@mastra/mcp-docs-server 1.0.0-beta.10 → 1.0.0-beta.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (168) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +12 -12
  2. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +50 -50
  3. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +10 -10
  4. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +45 -45
  5. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +109 -109
  6. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +39 -39
  7. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +39 -39
  8. package/.docs/organized/changelogs/%40mastra%2Fconvex.md +38 -0
  9. package/.docs/organized/changelogs/%40mastra%2Fcore.md +264 -264
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +25 -25
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +37 -37
  12. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +39 -39
  13. package/.docs/organized/changelogs/%40mastra%2Ffastembed.md +6 -0
  14. package/.docs/organized/changelogs/%40mastra%2Flance.md +39 -39
  15. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +45 -45
  16. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +22 -22
  17. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +13 -13
  18. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +39 -39
  19. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +39 -39
  20. package/.docs/organized/changelogs/%40mastra%2Fpg.md +45 -45
  21. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +104 -104
  22. package/.docs/organized/changelogs/%40mastra%2Freact.md +66 -0
  23. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
  24. package/.docs/organized/changelogs/%40mastra%2Fserver.md +59 -59
  25. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +39 -39
  26. package/.docs/organized/changelogs/create-mastra.md +31 -31
  27. package/.docs/organized/changelogs/mastra.md +49 -49
  28. package/.docs/organized/code-examples/quick-start.md +0 -4
  29. package/.docs/organized/code-examples/stock-price-tool.md +21 -2
  30. package/.docs/raw/agents/agent-approval.mdx +136 -2
  31. package/.docs/raw/agents/agent-memory.mdx +4 -4
  32. package/.docs/raw/agents/guardrails.mdx +44 -7
  33. package/.docs/raw/agents/networks.mdx +1 -1
  34. package/.docs/raw/agents/overview.mdx +2 -2
  35. package/.docs/raw/agents/processors.mdx +151 -0
  36. package/.docs/raw/agents/using-tools.mdx +1 -1
  37. package/.docs/raw/course/01-first-agent/07-creating-your-agent.md +1 -2
  38. package/.docs/raw/course/01-first-agent/12-connecting-tool-to-agent.md +1 -1
  39. package/.docs/raw/course/01-first-agent/16-adding-memory-to-agent.md +1 -2
  40. package/.docs/raw/course/02-agent-tools-mcp/05-updating-your-agent.md +1 -1
  41. package/.docs/raw/course/02-agent-tools-mcp/10-updating-agent-instructions-zapier.md +1 -1
  42. package/.docs/raw/course/02-agent-tools-mcp/16-updating-agent-instructions-github.md +1 -1
  43. package/.docs/raw/course/02-agent-tools-mcp/21-updating-agent-instructions-hackernews.md +1 -1
  44. package/.docs/raw/course/02-agent-tools-mcp/27-updating-agent-instructions-filesystem.md +1 -1
  45. package/.docs/raw/course/02-agent-tools-mcp/31-enhancing-memory-configuration.md +2 -2
  46. package/.docs/raw/course/03-agent-memory/04-creating-basic-memory-agent.md +1 -2
  47. package/.docs/raw/course/03-agent-memory/08-configuring-conversation-history.md +1 -2
  48. package/.docs/raw/course/03-agent-memory/16-configuring-semantic-recall.md +3 -4
  49. package/.docs/raw/course/03-agent-memory/21-configuring-working-memory.md +2 -3
  50. package/.docs/raw/course/03-agent-memory/22-custom-working-memory-templates.md +2 -3
  51. package/.docs/raw/course/03-agent-memory/25-combining-memory-features.md +1 -2
  52. package/.docs/raw/course/03-agent-memory/27-creating-learning-assistant.md +2 -3
  53. package/.docs/raw/course/04-workflows/11-creating-an-ai-agent.md +2 -3
  54. package/.docs/raw/deployment/cloud-providers.mdx +20 -0
  55. package/.docs/raw/deployment/{building-mastra.mdx → mastra-server.mdx} +2 -2
  56. package/.docs/raw/deployment/monorepo.mdx +23 -44
  57. package/.docs/raw/deployment/overview.mdx +28 -53
  58. package/.docs/raw/deployment/web-framework.mdx +12 -14
  59. package/.docs/raw/getting-started/mcp-docs-server.mdx +57 -0
  60. package/.docs/raw/getting-started/start.mdx +10 -1
  61. package/.docs/raw/getting-started/studio.mdx +25 -2
  62. package/.docs/raw/guides/build-your-ui/ai-sdk-ui.mdx +1021 -67
  63. package/.docs/raw/{deployment/cloud-providers → guides/deployment}/aws-lambda.mdx +3 -6
  64. package/.docs/raw/{deployment/cloud-providers → guides/deployment}/azure-app-services.mdx +4 -6
  65. package/.docs/raw/{deployment/cloud-providers → guides/deployment}/cloudflare-deployer.mdx +4 -0
  66. package/.docs/raw/{deployment/cloud-providers → guides/deployment}/digital-ocean.mdx +3 -6
  67. package/.docs/raw/guides/deployment/index.mdx +32 -0
  68. package/.docs/raw/{deployment/cloud-providers → guides/deployment}/netlify-deployer.mdx +4 -0
  69. package/.docs/raw/{deployment/cloud-providers → guides/deployment}/vercel-deployer.mdx +4 -0
  70. package/.docs/raw/guides/getting-started/express.mdx +71 -152
  71. package/.docs/raw/guides/getting-started/hono.mdx +227 -0
  72. package/.docs/raw/guides/getting-started/next-js.mdx +173 -63
  73. package/.docs/raw/guides/getting-started/vite-react.mdx +307 -137
  74. package/.docs/raw/guides/guide/research-assistant.mdx +4 -4
  75. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +70 -0
  76. package/.docs/raw/guides/migrations/upgrade-to-v1/client.mdx +17 -0
  77. package/.docs/raw/guides/migrations/upgrade-to-v1/overview.mdx +6 -0
  78. package/.docs/raw/index.mdx +1 -1
  79. package/.docs/raw/{deployment/mastra-cloud → mastra-cloud}/dashboard.mdx +2 -6
  80. package/.docs/raw/{deployment/mastra-cloud → mastra-cloud}/observability.mdx +1 -5
  81. package/.docs/raw/{deployment/mastra-cloud → mastra-cloud}/overview.mdx +2 -6
  82. package/.docs/raw/{deployment/mastra-cloud → mastra-cloud}/setting-up.mdx +3 -6
  83. package/.docs/raw/memory/overview.mdx +1 -1
  84. package/.docs/raw/memory/storage/memory-with-libsql.mdx +1 -1
  85. package/.docs/raw/memory/storage/memory-with-mongodb.mdx +1 -1
  86. package/.docs/raw/memory/storage/memory-with-pg.mdx +1 -1
  87. package/.docs/raw/memory/storage/memory-with-upstash.mdx +1 -1
  88. package/.docs/raw/{server-db/storage.mdx → memory/storage/overview.mdx} +2 -2
  89. package/.docs/raw/observability/logging.mdx +1 -1
  90. package/.docs/raw/observability/tracing/exporters/cloud.mdx +1 -1
  91. package/.docs/raw/observability/tracing/exporters/default.mdx +1 -1
  92. package/.docs/raw/rag/chunking-and-embedding.mdx +12 -25
  93. package/.docs/raw/rag/graph-rag.mdx +220 -0
  94. package/.docs/raw/rag/overview.mdx +1 -2
  95. package/.docs/raw/rag/retrieval.mdx +13 -29
  96. package/.docs/raw/rag/vector-databases.mdx +7 -3
  97. package/.docs/raw/reference/agents/agent.mdx +11 -4
  98. package/.docs/raw/reference/agents/getDefaultGenerateOptions.mdx +1 -1
  99. package/.docs/raw/reference/agents/getDefaultOptions.mdx +1 -1
  100. package/.docs/raw/reference/agents/getDefaultStreamOptions.mdx +1 -1
  101. package/.docs/raw/reference/agents/getInstructions.mdx +1 -1
  102. package/.docs/raw/reference/agents/getLLM.mdx +1 -1
  103. package/.docs/raw/reference/agents/getMemory.mdx +1 -1
  104. package/.docs/raw/reference/agents/getModel.mdx +1 -1
  105. package/.docs/raw/reference/agents/listScorers.mdx +1 -1
  106. package/.docs/raw/reference/ai-sdk/chat-route.mdx +1 -1
  107. package/.docs/raw/reference/ai-sdk/handle-chat-stream.mdx +1 -1
  108. package/.docs/raw/reference/ai-sdk/handle-network-stream.mdx +1 -1
  109. package/.docs/raw/reference/ai-sdk/handle-workflow-stream.mdx +1 -1
  110. package/.docs/raw/reference/ai-sdk/network-route.mdx +1 -1
  111. package/.docs/raw/reference/ai-sdk/to-ai-sdk-v4-messages.mdx +127 -0
  112. package/.docs/raw/reference/ai-sdk/to-ai-sdk-v5-messages.mdx +107 -0
  113. package/.docs/raw/reference/ai-sdk/workflow-route.mdx +1 -1
  114. package/.docs/raw/reference/auth/auth0.mdx +1 -1
  115. package/.docs/raw/reference/auth/clerk.mdx +1 -1
  116. package/.docs/raw/reference/auth/firebase.mdx +1 -1
  117. package/.docs/raw/reference/auth/jwt.mdx +1 -1
  118. package/.docs/raw/reference/auth/supabase.mdx +1 -1
  119. package/.docs/raw/reference/auth/workos.mdx +1 -1
  120. package/.docs/raw/reference/cli/mastra.mdx +1 -1
  121. package/.docs/raw/reference/client-js/mastra-client.mdx +1 -1
  122. package/.docs/raw/reference/client-js/workflows.mdx +20 -0
  123. package/.docs/raw/reference/core/getServer.mdx +3 -3
  124. package/.docs/raw/reference/core/getStorage.mdx +1 -1
  125. package/.docs/raw/reference/core/getStoredAgentById.mdx +1 -1
  126. package/.docs/raw/reference/core/listStoredAgents.mdx +1 -1
  127. package/.docs/raw/reference/core/setStorage.mdx +1 -1
  128. package/.docs/raw/reference/logging/pino-logger.mdx +1 -1
  129. package/.docs/raw/reference/processors/processor-interface.mdx +314 -13
  130. package/.docs/raw/reference/rag/database-config.mdx +1 -1
  131. package/.docs/raw/reference/server/create-route.mdx +1 -1
  132. package/.docs/raw/reference/server/express-adapter.mdx +4 -4
  133. package/.docs/raw/reference/server/hono-adapter.mdx +4 -4
  134. package/.docs/raw/reference/server/mastra-server.mdx +2 -2
  135. package/.docs/raw/reference/server/routes.mdx +28 -1
  136. package/.docs/raw/reference/streaming/ChunkType.mdx +23 -2
  137. package/.docs/raw/reference/streaming/agents/stream.mdx +38 -29
  138. package/.docs/raw/reference/streaming/workflows/stream.mdx +33 -20
  139. package/.docs/raw/reference/tools/create-tool.mdx +23 -1
  140. package/.docs/raw/reference/tools/graph-rag-tool.mdx +3 -3
  141. package/.docs/raw/reference/tools/vector-query-tool.mdx +3 -3
  142. package/.docs/raw/reference/workflows/run-methods/startAsync.mdx +143 -0
  143. package/.docs/raw/reference/workflows/workflow-methods/create-run.mdx +35 -0
  144. package/.docs/raw/reference/workflows/workflow-methods/foreach.mdx +68 -3
  145. package/.docs/raw/reference/workflows/workflow.mdx +37 -0
  146. package/.docs/raw/{auth → server/auth}/auth0.mdx +1 -1
  147. package/.docs/raw/{auth → server/auth}/clerk.mdx +1 -1
  148. package/.docs/raw/{auth → server/auth}/firebase.mdx +1 -1
  149. package/.docs/raw/{auth → server/auth}/index.mdx +6 -6
  150. package/.docs/raw/{auth → server/auth}/jwt.mdx +1 -1
  151. package/.docs/raw/{auth → server/auth}/supabase.mdx +1 -1
  152. package/.docs/raw/{auth → server/auth}/workos.mdx +1 -1
  153. package/.docs/raw/{server-db → server}/custom-adapters.mdx +3 -3
  154. package/.docs/raw/{server-db → server}/custom-api-routes.mdx +1 -1
  155. package/.docs/raw/{server-db → server}/mastra-client.mdx +2 -2
  156. package/.docs/raw/{server-db → server}/mastra-server.mdx +12 -10
  157. package/.docs/raw/{server-db → server}/middleware.mdx +2 -2
  158. package/.docs/raw/{server-db → server}/request-context.mdx +3 -3
  159. package/.docs/raw/{server-db → server}/server-adapters.mdx +6 -6
  160. package/.docs/raw/tools-mcp/overview.mdx +2 -2
  161. package/.docs/raw/workflows/control-flow.mdx +348 -2
  162. package/.docs/raw/workflows/error-handling.mdx +162 -1
  163. package/.docs/raw/workflows/overview.mdx +2 -2
  164. package/CHANGELOG.md +21 -0
  165. package/package.json +5 -5
  166. package/.docs/organized/changelogs/%40internal%2Fai-sdk-v4.md +0 -1
  167. package/.docs/raw/deployment/cloud-providers/index.mdx +0 -55
  168. /package/.docs/raw/{deployment/cloud-providers → guides/deployment}/amazon-ec2.mdx +0 -0
@@ -5,6 +5,12 @@ description: "Overview of breaking changes when upgrading to Mastra v1."
5
5
 
6
6
  # Upgrade to Mastra v1
7
7
 
8
+ :::info[First update to latest 0.x version]
9
+ Before upgrading to v1, make sure you've updated to the latest 0.x version of Mastra. Follow the [upgrade to latest 0.x guide](https://mastra.ai/guides/migrations/upgrade-to-latest-0x) first, then return here to complete the v1 migration.
10
+ :::
11
+
12
+ Mastra v1 is coming in January 2026. We recommend starting any new projects with the beta, or upgrading your existing project today to get ahead.
13
+
8
14
  This guide provides a comprehensive overview of breaking changes when upgrading from Mastra 0.x to v1.0. The migration is organized by package and feature area to help you systematically update your codebase.
9
15
 
10
16
  :::tip[Need help?]
@@ -27,7 +27,7 @@ Some highlights include:
27
27
 
28
28
  - [**Workflows**](/docs/v1/workflows/overview) - When you need explicit control over execution, use Mastra's graph-based workflow engine to orchestrate complex multi-step processes. Mastra workflows use an intuitive syntax for control flow (`.then()`, `.branch()`, `.parallel()`).
29
29
 
30
- - [**Human-in-the-loop**](/docs/v1/workflows/suspend-and-resume) - Suspend an agent or workflow and await user input or approval before resuming. Mastra uses [storage](/docs/v1/server-db/storage) to remember execution state, so you can pause indefinitely and resume where you left off.
30
+ - [**Human-in-the-loop**](/docs/v1/workflows/suspend-and-resume) - Suspend an agent or workflow and await user input or approval before resuming. Mastra uses [storage](/docs/v1/memory/storage/overview) to remember execution state, so you can pause indefinitely and resume where you left off.
31
31
 
32
32
  - **Context management** - Give your agents the right context at the right time. Provide [conversation history](/docs/v1/memory/conversation-history), [retrieve](/docs/v1/rag/overview) data from your sources (APIs, databases, files), and add human-like [working](/docs/v1/memory/working-memory) and [semantic](/docs/v1/memory/semantic-recall) memory so your agents behave coherently.
33
33
 
@@ -3,13 +3,9 @@ title: "Navigating the Dashboard | Mastra Cloud"
3
3
  description: Details of each feature available in Mastra Cloud
4
4
  ---
5
5
 
6
- import { MastraCloudCallout } from "@site/src/components/MastraCloudCallout";
7
-
8
6
  # Navigating the Dashboard
9
7
 
10
- This page explains how to navigate the Mastra Cloud dashboard, where you can configure your project, view deployment details, and interact with agents and workflows using the built-in [Studio](/docs/v1/deployment/mastra-cloud/dashboard#studio).
11
-
12
- <MastraCloudCallout />
8
+ This page explains how to navigate the Mastra Cloud dashboard, where you can configure your project, view deployment details, and interact with agents and workflows using the built-in [Studio](/docs/v1/mastra-cloud/dashboard#studio).
13
9
 
14
10
  ## Overview
15
11
 
@@ -97,4 +93,4 @@ Each MCP Server includes API endpoints for HTTP and SSE, along with IDE configur
97
93
 
98
94
  ## Next steps
99
95
 
100
- - [Understanding Tracing and Logs](/docs/v1/deployment/mastra-cloud/observability)
96
+ - [Understanding Tracing and Logs](/docs/v1/mastra-cloud/observability)
@@ -3,16 +3,12 @@ title: "Understanding Tracing and Logs | Mastra Cloud"
3
3
  description: Monitoring and debugging tools for Mastra Cloud deployments
4
4
  ---
5
5
 
6
- import { MastraCloudCallout } from "@site/src/components/MastraCloudCallout";
7
-
8
6
  # Understanding Tracing and Logs
9
7
 
10
8
  Mastra Cloud provides full observability for production applications, giving you insight into how your agents and workflows behave. Observability can be enabled whether your application is deployed to Mastra Cloud, running locally, or hosted on your own infrastructure. Any Mastra project can send traces and logs to the platform regardless of where it's running.
11
9
 
12
10
  For details on configuring observability, see the [Cloud Exporter](/docs/v1/observability/tracing/exporters/cloud) docs.
13
11
 
14
- <MastraCloudCallout />
15
-
16
12
  ## Traces
17
13
 
18
14
  More detailed traces are available for both agents and workflows by enabling [observability](/docs/v1/observability/tracing/overview) using one of our [supported providers](/docs/v1/observability/tracing/overview#exporters).
@@ -39,7 +35,7 @@ Workflow traces capture each step in the run, including transitions, branching,
39
35
 
40
36
  ## Logs
41
37
 
42
- You can view detailed logs for debugging and monitoring your application's behavior on the [Logs](/docs/v1/deployment/mastra-cloud/dashboard#logs) page of the Dashboard.
38
+ You can view detailed logs for debugging and monitoring your application's behavior on the [Logs](/docs/v1/mastra-cloud/dashboard#logs) page of the Dashboard.
43
39
 
44
40
  ![Dashboard logs](/img/mastra-cloud/mastra-cloud-dashboard-logs.jpg)
45
41
 
@@ -3,13 +3,9 @@ title: "Mastra Cloud | Mastra Cloud"
3
3
  description: Deployment and monitoring service for Mastra applications
4
4
  ---
5
5
 
6
- import { MastraCloudCallout } from "@site/src/components/MastraCloudCallout";
7
-
8
6
  # Mastra Cloud
9
7
 
10
- [Mastra Cloud](https://mastra.ai/cloud) is a platform for deploying, managing, monitoring, and debugging Mastra applications. When you [deploy](/docs/v1/deployment/mastra-cloud/setting-up) your application, Mastra Cloud exposes your agents, tools, and workflows as REST API endpoints.
11
-
12
- <MastraCloudCallout />
8
+ [Mastra Cloud](https://mastra.ai/cloud) is a platform for deploying, managing, monitoring, and debugging Mastra applications. When you [deploy](/docs/v1/mastra-cloud/setting-up) your application, Mastra Cloud exposes your agents, tools, and workflows as REST API endpoints.
13
9
 
14
10
  ## Platform features
15
11
 
@@ -62,4 +58,4 @@ Mastra Cloud is purpose-built for Mastra agents, tools, and workflows. It handle
62
58
 
63
59
  ## Next steps
64
60
 
65
- - [Setting Up and Deploying](/docs/v1/deployment/mastra-cloud/setting-up)
61
+ - [Setting Up and Deploying](/docs/v1/mastra-cloud/setting-up)
@@ -3,7 +3,6 @@ title: "Setting Up and Deploying | Mastra Cloud"
3
3
  description: Configuration steps for Mastra Cloud projects
4
4
  ---
5
5
 
6
- import { MastraCloudCallout } from "@site/src/components/MastraCloudCallout";
7
6
  import Steps from "@site/src/components/Steps";
8
7
  import StepItem from "@site/src/components/StepItem";
9
8
 
@@ -11,8 +10,6 @@ import StepItem from "@site/src/components/StepItem";
11
10
 
12
11
  This page explains how to set up a project on [Mastra Cloud](https://mastra.ai/cloud) with automatic deployments using our GitHub integration.
13
12
 
14
- <MastraCloudCallout />
15
-
16
13
  ## Prerequisites
17
14
 
18
15
  - A [Mastra Cloud](https://mastra.ai/cloud) account
@@ -73,7 +70,7 @@ Mastra Cloud automatically detects the right build settings, but you can customi
73
70
  - **Install command**: Runs pre-build to install project dependencies
74
71
  - **Project setup command**: Runs pre-build to prepare any external dependencies
75
72
  - **Port**: The network port the server will use
76
- - **Store settings**: Use Mastra Cloud's built-in [LibSQLStore](/docs/v1/server-db/storage) storage
73
+ - **Store settings**: Use Mastra Cloud's built-in [LibSQLStore](/docs/v1/memory/storage/overview) storage
77
74
  - **Deploy Project**: Starts the deployment process
78
75
 
79
76
  </StepItem>
@@ -98,8 +95,8 @@ Your project is now configured with automatic deployments which occur whenever y
98
95
 
99
96
  ## Testing your application
100
97
 
101
- After a successful deployment you can test your agents and workflows [Studio](/docs/v1/deployment/mastra-cloud/dashboard#studio) in Mastra Cloud, or interact with them using our [Client SDK](/docs/v1/server-db/mastra-client).
98
+ After a successful deployment you can test your agents and workflows [Studio](/docs/v1/mastra-cloud/dashboard#studio) in Mastra Cloud, or interact with them using our [Client SDK](/docs/v1/server/mastra-client).
102
99
 
103
100
  ## Next steps
104
101
 
105
- - [Navigating the Dashboard](/docs/v1/deployment/mastra-cloud/dashboard)
102
+ - [Navigating the Dashboard](/docs/v1/mastra-cloud/dashboard)
@@ -3,7 +3,7 @@ title: "Memory overview | Memory"
3
3
  description: "Learn how Mastra's memory system works with working memory, conversation history, and semantic recall."
4
4
  ---
5
5
 
6
- # Memory overview
6
+ # Memory Overview
7
7
 
8
8
  Memory in Mastra helps agents manage context across conversations by condensing relevant information into the language model's context window.
9
9
 
@@ -1,5 +1,5 @@
1
1
  ---
2
- title: "Memory with LibSQL | Memory"
2
+ title: "Memory with LibSQL | Storage"
3
3
  description: Example for how to use Mastra's memory system with LibSQL storage and vector database backend.
4
4
  ---
5
5
 
@@ -1,5 +1,5 @@
1
1
  ---
2
- title: "Example: Memory with MongoDB | Memory"
2
+ title: "Example: Memory with MongoDB | Storage"
3
3
  description: Example for how to use Mastra's memory system with MongoDB storage and vector capabilities.
4
4
  ---
5
5
 
@@ -1,5 +1,5 @@
1
1
  ---
2
- title: "Memory with Postgres | Memory"
2
+ title: "Memory with Postgres | Storage"
3
3
  description: Example for how to use Mastra's memory system with PostgreSQL storage and vector capabilities.
4
4
  ---
5
5
 
@@ -1,5 +1,5 @@
1
1
  ---
2
- title: "Memory with Upstash | Memory"
2
+ title: "Memory with Upstash | Storage"
3
3
  description: Example for how to use Mastra's memory system with Upstash Redis storage and vector capabilities.
4
4
  ---
5
5
 
@@ -1,5 +1,5 @@
1
1
  ---
2
- title: "MastraStorage | Server & DB"
2
+ title: "MastraStorage | Storage"
3
3
  description: Overview of Mastra's storage system and data persistence capabilities.
4
4
  ---
5
5
 
@@ -9,7 +9,7 @@ import { StorageOverviewImage } from "@site/src/components/StorageOverviewImage"
9
9
  import Tabs from "@theme/Tabs";
10
10
  import TabItem from "@theme/TabItem";
11
11
 
12
- # MastraStorage
12
+ # Storage Overview
13
13
 
14
14
  `MastraStorage` provides a unified interface for managing:
15
15
 
@@ -7,7 +7,7 @@ description: Learn how to use logging in Mastra to monitor execution, capture ap
7
7
 
8
8
  Mastra's logging system captures function execution, input data, and output responses in a structured format.
9
9
 
10
- When deploying to Mastra Cloud, logs are shown on the [Logs](/docs/v1/deployment/mastra-cloud/observability) page. In self-hosted or custom environments, logs can be directed to files or external services depending on the configured transports.
10
+ When deploying to Mastra Cloud, logs are shown on the [Logs](/docs/v1/mastra-cloud/observability) page. In self-hosted or custom environments, logs can be directed to files or external services depending on the configured transports.
11
11
 
12
12
  ## Configuring logs with PinoLogger
13
13
 
@@ -112,4 +112,4 @@ CloudExporter uses intelligent batching to optimize network usage. Traces are bu
112
112
 
113
113
  - [Tracing Overview](/docs/v1/observability/tracing/overview)
114
114
  - [DefaultExporter](/docs/v1/observability/tracing/exporters/default)
115
- - [Mastra Cloud Documentation](/docs/v1/deployment/mastra-cloud/overview)
115
+ - [Mastra Cloud Documentation](/docs/v1/mastra-cloud/overview)
@@ -160,4 +160,4 @@ new DefaultExporter({
160
160
 
161
161
  - [Tracing Overview](/docs/v1/observability/tracing/overview)
162
162
  - [CloudExporter](/docs/v1/observability/tracing/exporters/cloud)
163
- - [Storage Configuration](/docs/v1/server-db/storage)
163
+ - [Storage Configuration](/docs/v1/memory/storage/overview)
@@ -28,7 +28,9 @@ Use `chunk` to split documents into manageable pieces. Mastra supports multiple
28
28
  - `latex`: LaTeX structure-aware splitting
29
29
  - `sentence`: Sentence-aware splitting
30
30
 
31
- **Note:** Each strategy accepts different parameters optimized for its chunking approach.
31
+ :::note
32
+ Each strategy accepts different parameters optimized for its chunking approach.
33
+ :::
32
34
 
33
35
  Here's an example of how to use the `recursive` strategy:
34
36
 
@@ -67,15 +69,17 @@ const chunks = await doc.chunk({
67
69
  });
68
70
  ```
69
71
 
70
- **Note:** Metadata extraction may use LLM calls, so ensure your API key is set.
72
+ :::note
73
+ Metadata extraction may use LLM calls, so ensure your API key is set.
74
+ :::
71
75
 
72
- We go deeper into chunking strategies in our [chunk documentation](/reference/v1/rag/chunk).
76
+ We go deeper into chunking strategies in our [`chunk()` reference documentation](/reference/v1/rag/chunk).
73
77
 
74
78
  ## Step 2: Embedding Generation
75
79
 
76
- Transform chunks into embeddings using your preferred provider. Mastra supports embedding models through the model router or AI SDK packages.
80
+ Transform chunks into embeddings using your preferred provider. Mastra supports embedding models through the model router.
77
81
 
78
- ### Using the Model Router (Recommended)
82
+ ### Using the Model Router
79
83
 
80
84
  The simplest way is to use Mastra's model router with `provider/model` strings:
81
85
 
@@ -89,27 +93,10 @@ const { embeddings } = await embedMany({
89
93
  });
90
94
  ```
91
95
 
92
- Supported embedding models:
93
-
94
- - **OpenAI**: `text-embedding-3-small`, `text-embedding-3-large`, `text-embedding-ada-002`
95
- - **Google**: `gemini-embedding-001`, `text-embedding-004`
96
+ Mastra supports OpenAI and Google embedding models. For a complete list of supported embedding models, see the [embeddings reference](/reference/v1/rag/embeddings).
96
97
 
97
98
  The model router automatically handles API key detection from environment variables.
98
99
 
99
- ### Using AI SDK Packages
100
-
101
- You can also use AI SDK embedding models directly:
102
-
103
- ```ts showLineNumbers copy
104
- import { embedMany } from "ai";
105
- import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
106
-
107
- const { embeddings } = await embedMany({
108
- model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
109
- values: chunks.map((chunk) => chunk.text),
110
- });
111
- ```
112
-
113
100
  The embedding functions return vectors, arrays of numbers representing the semantic meaning of your text, ready for similarity searches in your vector database.
114
101
 
115
102
  ### Configuring Embedding Dimensions
@@ -147,9 +134,9 @@ const { embeddings } = await embedMany({
147
134
  });
148
135
  ```
149
136
 
150
- ### Vector Database Compatibility
151
-
137
+ :::important[Vector Database Compatibility]
152
138
  When storing embeddings, the vector database index must be configured to match the output size of your embedding model. If the dimensions do not match, you may get errors or data corruption.
139
+ :::
153
140
 
154
141
  ## Example: Complete Pipeline
155
142
 
@@ -0,0 +1,220 @@
1
+ ---
2
+ title: "GraphRAG | RAG"
3
+ description: Guide on graph-based retrieval in Mastra's RAG systems for documents with complex relationships.
4
+ ---
5
+
6
+ # GraphRAG
7
+
8
+ Graph-based retrieval enhances traditional vector search by following relationships between chunks of information. This approach is useful when information is spread across multiple documents or when documents reference each other.
9
+
10
+ ## When to use GraphRAG
11
+
12
+ GraphRAG is particularly effective when:
13
+
14
+ - Information is spread across multiple documents
15
+ - Documents reference each other
16
+ - You need to traverse relationships to find complete answers
17
+ - Understanding connections between concepts is important
18
+ - Simple vector similarity misses important contextual relationships
19
+
20
+ For straightforward semantic search without relationship traversal, use [standard retrieval methods](/docs/v1/rag/retrieval).
21
+
22
+ ## How GraphRAG works
23
+
24
+ GraphRAG combines vector similarity with knowledge graph traversal:
25
+
26
+ 1. Initial vector search retrieves relevant chunks based on semantic similarity
27
+ 2. A knowledge graph is constructed from the retrieved chunks
28
+ 3. The graph is traversed to find connected information
29
+ 4. Results include both directly relevant chunks and related content
30
+
31
+ This process helps surface information that might not be semantically similar to the query but is contextually relevant through connections.
32
+
33
+ ## Creating a graph query tool
34
+
35
+ The Graph Query Tool provides agents with the ability to perform graph-based retrieval:
36
+
37
+ ```ts showLineNumbers copy
38
+ import { createGraphRAGTool } from "@mastra/rag";
39
+ import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
40
+
41
+ const graphQueryTool = createGraphRAGTool({
42
+ vectorStoreName: "pgVector",
43
+ indexName: "embeddings",
44
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
45
+ graphOptions: {
46
+ threshold: 0.7,
47
+ },
48
+ });
49
+ ```
50
+
51
+ ### Configuration options
52
+
53
+ The `graphOptions` parameter controls how the knowledge graph is built and traversed:
54
+
55
+ - `threshold`: Similarity threshold (0-1) for determining which chunks are related. Higher values create sparser graphs with stronger connections; lower values create denser graphs with more potential relationships.
56
+ - `dimension`: Vector embedding dimension. Must match the embedding model's output dimension (e.g., 1536 for OpenAI's text-embedding-3-small).
57
+
58
+ ```ts showLineNumbers copy
59
+ const graphQueryTool = createGraphRAGTool({
60
+ vectorStoreName: "pgVector",
61
+ indexName: "embeddings",
62
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
63
+ graphOptions: {
64
+ dimension: 1536,
65
+ threshold: 0.7,
66
+ },
67
+ });
68
+ ```
69
+
70
+ ## Using GraphRAG with agents
71
+
72
+ Integrate the graph query tool with an agent to enable graph-based retrieval:
73
+
74
+ ```ts showLineNumbers copy
75
+ import { Agent } from "@mastra/core/agent";
76
+
77
+ const ragAgent = new Agent({
78
+ id: "rag-agent",
79
+ name: "GraphRAG Agent",
80
+ instructions: `You are a helpful assistant that answers questions based on the provided context.
81
+ When answering questions, use the graph query tool to find relevant information and relationships.
82
+ Base your answers on the context provided by the tool, and clearly state if the context doesn't contain enough information.`,
83
+ model: "openai/gpt-5.1",
84
+ tools: {
85
+ graphQueryTool,
86
+ },
87
+ });
88
+ ```
89
+
90
+ ## Document processing and storage
91
+
92
+ Before using graph-based retrieval, process documents into chunks and store their embeddings:
93
+
94
+ ```ts showLineNumbers copy
95
+ import { MDocument } from "@mastra/rag";
96
+ import { embedMany } from "ai";
97
+ import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
98
+
99
+ // Create and chunk document
100
+ const doc = MDocument.fromText("Your document content here...");
101
+
102
+ const chunks = await doc.chunk({
103
+ strategy: "recursive",
104
+ size: 512,
105
+ overlap: 50,
106
+ separator: "\n",
107
+ });
108
+
109
+ // Generate embeddings
110
+ const { embeddings } = await embedMany({
111
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
112
+ values: chunks.map((chunk) => chunk.text),
113
+ });
114
+
115
+ // Store in vector database
116
+ const vectorStore = mastra.getVector("pgVector");
117
+ await vectorStore.createIndex({
118
+ indexName: "embeddings",
119
+ dimension: 1536,
120
+ });
121
+ await vectorStore.upsert({
122
+ indexName: "embeddings",
123
+ vectors: embeddings,
124
+ metadata: chunks?.map((chunk) => ({ text: chunk.text })),
125
+ });
126
+ ```
127
+
128
+ ## Querying with GraphRAG
129
+
130
+ Once configured, the agent can perform graph-based queries:
131
+
132
+ ```ts showLineNumbers copy
133
+ const query = "What are the effects of infrastructure changes on local businesses?";
134
+ const response = await ragAgent.generate(query);
135
+ console.log(response.text);
136
+ ```
137
+
138
+ The agent uses the graph query tool to:
139
+
140
+ 1. Convert the query to an embedding
141
+ 2. Find semantically similar chunks in the vector store
142
+ 3. Build a knowledge graph from related chunks
143
+ 4. Traverse the graph to find connected information
144
+ 5. Return comprehensive context for generating the response
145
+
146
+ ## Choosing the right threshold
147
+
148
+ The threshold parameter significantly impacts retrieval quality:
149
+
150
+ - **High threshold (0.8-0.9)**: Strict connections, fewer relationships, more precise but potentially incomplete results
151
+ - **Medium threshold (0.6-0.8)**: Balanced approach, good for most use cases
152
+ - **Low threshold (0.4-0.6)**: More connections, broader context, risk of including less relevant information
153
+
154
+ Start with 0.7 and adjust based on your specific use case:
155
+
156
+ ```ts showLineNumbers copy
157
+ // Strict connections for precise answers
158
+ const strictGraphTool = createGraphRAGTool({
159
+ vectorStoreName: "pgVector",
160
+ indexName: "embeddings",
161
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
162
+ graphOptions: {
163
+ threshold: 0.85,
164
+ },
165
+ });
166
+
167
+ // Broader connections for exploratory queries
168
+ const broadGraphTool = createGraphRAGTool({
169
+ vectorStoreName: "pgVector",
170
+ indexName: "embeddings",
171
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
172
+ graphOptions: {
173
+ threshold: 0.5,
174
+ },
175
+ });
176
+ ```
177
+
178
+ ## Combining with other retrieval methods
179
+
180
+ GraphRAG can be used alongside other retrieval approaches:
181
+
182
+ ```ts showLineNumbers copy
183
+ import { createVectorQueryTool } from "@mastra/rag";
184
+
185
+ const vectorQueryTool = createVectorQueryTool({
186
+ vectorStoreName: "pgVector",
187
+ indexName: "embeddings",
188
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
189
+ });
190
+
191
+ const graphQueryTool = createGraphRAGTool({
192
+ vectorStoreName: "pgVector",
193
+ indexName: "embeddings",
194
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
195
+ graphOptions: {
196
+ threshold: 0.7,
197
+ },
198
+ });
199
+
200
+ const agent = new Agent({
201
+ id: "rag-agent",
202
+ name: "RAG Agent",
203
+ instructions: `Use vector search for simple fact-finding queries.
204
+ Use graph search when you need to understand relationships or find connected information.`,
205
+ model: "openai/gpt-5.1",
206
+ tools: {
207
+ vectorQueryTool,
208
+ graphQueryTool,
209
+ },
210
+ });
211
+ ```
212
+
213
+ This gives the agent flexibility to choose the appropriate retrieval method based on the query.
214
+
215
+ ## Reference
216
+
217
+ For detailed API documentation, see:
218
+
219
+ - [GraphRAG Class](/reference/v1/rag/graph-rag)
220
+ - [createGraphRAGTool()](/reference/v1/tools/graph-rag-tool)
@@ -74,5 +74,4 @@ Mastra supports multiple vector stores for embedding persistence and similarity
74
74
 
75
75
  ## More resources
76
76
 
77
- - [Chain of Thought RAG Example](/examples/v1/rag/usage/cot-rag)
78
- - [All RAG Examples](/examples/v1/) (including different chunking strategies, embedding models, and vector stores)
77
+ - [Chain of Thought RAG Example](https://github.com/mastra-ai/mastra/tree/main/examples/basics/rag/cot-rag)
@@ -76,7 +76,9 @@ Results include both the text content and a similarity score:
76
76
 
77
77
  ### Metadata Filtering
78
78
 
79
- Filter results based on metadata fields to narrow down the search space. This is useful when you have documents from different sources, time periods, or with specific attributes. Mastra provides a unified MongoDB-style query syntax that works across all supported vector stores.
79
+ Filter results based on metadata fields to narrow down the search space. This approach - combining vector similarity search with metadata filters - is sometimes called hybrid vector search, as it merges semantic search with structured filtering criteria.
80
+
81
+ This is useful when you have documents from different sources, time periods, or with specific attributes. Mastra provides a unified MongoDB-style query syntax that works across all supported vector stores.
80
82
 
81
83
  For detailed information about available operators and syntax, see the [Metadata Filters Reference](/reference/v1/rag/metadata-filters).
82
84
 
@@ -146,13 +148,14 @@ Common use cases for metadata filtering:
146
148
  - Combine multiple conditions for precise querying
147
149
  - Filter by document attributes (e.g., language, author)
148
150
 
149
- For an example of how to use metadata filtering, see the [Hybrid Vector Search](/examples/v1/rag/query/hybrid-vector-search) example.
150
-
151
151
  ### Vector Query Tool
152
152
 
153
153
  Sometimes you want to give your agent the ability to query a vector database directly. The Vector Query Tool allows your agent to be in charge of retrieval decisions, combining semantic search with optional filtering and reranking based on the agent's understanding of the user's needs.
154
154
 
155
155
  ```ts showLineNumbers copy
156
+ import { createVectorQueryTool } from "@mastra/rag";
157
+ import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
158
+
156
159
  const vectorQueryTool = createVectorQueryTool({
157
160
  vectorStoreName: "pgVector",
158
161
  indexName: "embeddings",
@@ -180,6 +183,9 @@ Connection credentials (URLs, auth tokens) are configured when you instantiate t
180
183
  :::
181
184
 
182
185
  ```ts showLineNumbers copy
186
+ import { createVectorQueryTool } from "@mastra/rag";
187
+ import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
188
+
183
189
  // Pinecone with namespace
184
190
  const pineconeQueryTool = createVectorQueryTool({
185
191
  vectorStoreName: "pinecone",
@@ -535,8 +541,9 @@ The weights control how different factors influence the final ranking:
535
541
  - `vector`: Higher values favor the original vector similarity scores
536
542
  - `position`: Higher values help maintain the original ordering of results
537
543
 
538
-
539
- > **Note:** For semantic scoring to work properly during re-ranking, each result must include the text content in its `metadata.text` field.
544
+ :::note
545
+ For semantic scoring to work properly during re-ranking, each result must include the text content in its `metadata.text` field.
546
+ :::
540
547
 
541
548
  You can also use other relevance score providers like Cohere or ZeroEntropy:
542
549
 
@@ -552,27 +559,4 @@ The re-ranked results combine vector similarity with semantic understanding to i
552
559
 
553
560
  For more details about re-ranking, see the [rerank()](/reference/v1/rag/rerankWithScorer) method.
554
561
 
555
- ### Graph-based Retrieval
556
-
557
- For documents with complex relationships, graph-based retrieval can follow connections between chunks. This helps when:
558
-
559
- - Information is spread across multiple documents
560
- - Documents reference each other
561
- - You need to traverse relationships to find complete answers
562
-
563
- Example setup:
564
-
565
- ```ts showLineNumbers copy
566
- const graphQueryTool = createGraphQueryTool({
567
- vectorStoreName: "pgVector",
568
- indexName: "embeddings",
569
- model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
570
- graphOptions: {
571
- threshold: 0.7,
572
- },
573
- });
574
- ```
575
-
576
- For more details about graph-based retrieval, see the [GraphRAG](/reference/v1/rag/graph-rag) class and the [createGraphQueryTool()](/reference/v1/tools/graph-rag-tool) function.
577
-
578
- For an example of how to use the graph-based retrieval method, see the [Graph-based Retrieval](/examples/v1/rag/usage/graph-rag) example.
562
+ For graph-based retrieval that follows connections between chunks, see the [GraphRAG](/docs/v1/rag/graph-rag) documentation.
@@ -380,9 +380,11 @@ The dimension size must match the output dimension of your chosen embedding mode
380
380
 
381
381
  - OpenAI text-embedding-3-small: 1536 dimensions (or custom, e.g., 256)
382
382
  - Cohere embed-multilingual-v3: 1024 dimensions
383
- - Google `text-embedding-004`: 768 dimensions (or custom)
383
+ - Google text-embedding-004: 768 dimensions (or custom)
384
384
 
385
- > **Important**: Index dimensions cannot be changed after creation. To use a different model, delete and recreate the index with the new dimension size.
385
+ :::important
386
+ Index dimensions cannot be changed after creation. To use a different model, delete and recreate the index with the new dimension size.
387
+ :::
386
388
 
387
389
  ### Naming Rules for Databases
388
390
 
@@ -535,7 +537,9 @@ The upsert operation:
535
537
 
536
538
  Vector stores support rich metadata (any JSON-serializable fields) for filtering and organization. Since metadata is stored with no fixed schema, use consistent field naming to avoid unexpected query results.
537
539
 
538
- **Important**: Metadata is crucial for vector storage - without it, you'd only have numerical embeddings with no way to return the original text or filter results. Always store at least the source text as metadata.
540
+ :::important
541
+ Metadata is crucial for vector storage - without it, you'd only have numerical embeddings with no way to return the original text or filter results. Always store at least the source text as metadata.
542
+ :::
539
543
 
540
544
  ```ts showLineNumbers copy
541
545
  // Store embeddings with rich metadata for better organization and filtering
@@ -212,17 +212,24 @@ export const agent = new Agent({
212
212
  },
213
213
  {
214
214
  name: "inputProcessors",
215
- type: "Processor[] | ({ requestContext: RequestContext }) => Processor[] | Promise<Processor[]>",
215
+ type: "(Processor | ProcessorWorkflow)[] | ({ requestContext: RequestContext }) => (Processor | ProcessorWorkflow)[] | Promise<(Processor | ProcessorWorkflow)[]>",
216
216
  isOptional: true,
217
217
  description:
218
- "Input processors that can modify or validate messages before they are processed by the agent. Must implement the `processInput` function.",
218
+ "Input processors that can modify or validate messages before they are processed by the agent. Can be individual Processor objects or workflows created with `createWorkflow()` using ProcessorStepSchema.",
219
219
  },
220
220
  {
221
221
  name: "outputProcessors",
222
- type: "Processor[] | ({ requestContext: RequestContext }) => Processor[] | Promise<Processor[]>",
222
+ type: "(Processor | ProcessorWorkflow)[] | ({ requestContext: RequestContext }) => (Processor | ProcessorWorkflow)[] | Promise<(Processor | ProcessorWorkflow)[]>",
223
223
  isOptional: true,
224
224
  description:
225
- "Output processors that can modify or validate messages from the agent, before it is sent to the client. Must implement either (or both) of the `processOutputResult` and `processOutputStream` functions.",
225
+ "Output processors that can modify or validate messages from the agent before they are sent to the client. Can be individual Processor objects or workflows.",
226
+ },
227
+ {
228
+ name: "maxProcessorRetries",
229
+ type: "number",
230
+ isOptional: true,
231
+ description:
232
+ "Maximum number of times a processor can request retrying the LLM step.",
226
233
  },
227
234
  ]}
228
235
  />