@mastra/mcp-docs-server 0.13.1-alpha.0 → 0.13.1-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +15 -15
  2. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +44 -44
  3. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +15 -15
  4. package/.docs/organized/changelogs/%40mastra%2Fcore.md +37 -37
  5. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +53 -53
  6. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +53 -53
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +54 -54
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +63 -63
  9. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +40 -0
  10. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +19 -19
  11. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +24 -24
  12. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +8 -8
  13. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +13 -13
  14. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +14 -14
  15. package/.docs/organized/changelogs/%40mastra%2Fpg.md +31 -31
  16. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +50 -50
  17. package/.docs/organized/changelogs/%40mastra%2Fserver.md +47 -47
  18. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +37 -37
  19. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +9 -0
  20. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +12 -12
  21. package/.docs/organized/changelogs/create-mastra.md +15 -15
  22. package/.docs/organized/changelogs/mastra.md +86 -86
  23. package/.docs/organized/code-examples/agent.md +1 -1
  24. package/.docs/organized/code-examples/agui.md +4 -1
  25. package/.docs/organized/code-examples/ai-sdk-useChat.md +1 -1
  26. package/.docs/organized/code-examples/fireworks-r1.md +1 -1
  27. package/.docs/organized/code-examples/memory-with-processors.md +2 -2
  28. package/.docs/organized/code-examples/openapi-spec-writer.md +1 -1
  29. package/.docs/organized/code-examples/quick-start.md +1 -1
  30. package/.docs/organized/code-examples/weather-agent.md +7 -1
  31. package/.docs/raw/course/01-first-agent/03-verifying-installation.md +4 -2
  32. package/.docs/raw/course/01-first-agent/16-adding-memory-to-agent.md +1 -1
  33. package/.docs/raw/course/02-agent-tools-mcp/15-updating-mcp-config-github.md +1 -1
  34. package/.docs/raw/course/02-agent-tools-mcp/20-updating-mcp-config-hackernews.md +1 -1
  35. package/.docs/raw/course/02-agent-tools-mcp/26-updating-mcp-config-filesystem.md +1 -1
  36. package/.docs/raw/course/03-agent-memory/03-installing-memory.md +4 -2
  37. package/.docs/raw/course/03-agent-memory/04-creating-basic-memory-agent.md +1 -1
  38. package/.docs/raw/course/03-agent-memory/08-configuring-conversation-history.md +3 -3
  39. package/.docs/raw/course/03-agent-memory/13-vector-store-configuration.md +27 -0
  40. package/.docs/raw/course/03-agent-memory/{13-what-is-semantic-recall.md → 14-what-is-semantic-recall.md} +1 -1
  41. package/.docs/raw/course/03-agent-memory/16-configuring-semantic-recall.md +41 -0
  42. package/.docs/raw/course/03-agent-memory/18-advanced-configuration-semantic-recall.md +28 -0
  43. package/.docs/raw/course/03-agent-memory/21-configuring-working-memory.md +9 -9
  44. package/.docs/raw/course/03-agent-memory/22-custom-working-memory-templates.md +10 -2
  45. package/.docs/raw/course/03-agent-memory/25-combining-memory-features.md +8 -1
  46. package/.docs/raw/course/03-agent-memory/27-creating-learning-assistant.md +8 -1
  47. package/.docs/raw/deployment/deployment.mdx +26 -97
  48. package/.docs/raw/deployment/overview.mdx +18 -3
  49. package/.docs/raw/deployment/web-framework.mdx +63 -0
  50. package/.docs/raw/frameworks/web-frameworks/astro.mdx +7 -1
  51. package/.docs/raw/frameworks/web-frameworks/next-js.mdx +1 -1
  52. package/.docs/raw/getting-started/installation.mdx +98 -558
  53. package/.docs/raw/getting-started/project-structure.mdx +3 -16
  54. package/.docs/raw/rag/vector-databases.mdx +4 -7
  55. package/.docs/raw/reference/agents/generate.mdx +35 -3
  56. package/.docs/raw/reference/agents/stream.mdx +35 -3
  57. package/.docs/raw/reference/client-js/memory.mdx +14 -0
  58. package/.docs/raw/reference/client-js/workflows.mdx +28 -0
  59. package/.docs/raw/reference/deployer/cloudflare.mdx +9 -3
  60. package/.docs/raw/reference/deployer/deployer.mdx +1 -1
  61. package/.docs/raw/reference/deployer/netlify.mdx +25 -4
  62. package/.docs/raw/reference/deployer/vercel.mdx +10 -4
  63. package/.docs/raw/workflows/control-flow.mdx +45 -171
  64. package/.docs/raw/workflows/input-data-mapping.mdx +21 -88
  65. package/.docs/raw/workflows/overview.mdx +23 -46
  66. package/.docs/raw/workflows/suspend-and-resume.mdx +46 -34
  67. package/.docs/raw/workflows/using-with-agents-and-tools.mdx +55 -191
  68. package/dist/_tsup-dts-rollup.d.ts +14 -0
  69. package/dist/{chunk-QWIXFGFR.js → chunk-P5AHYMUI.js} +126 -20
  70. package/dist/prepare-docs/prepare.js +1 -1
  71. package/dist/stdio.js +42 -12
  72. package/package.json +3 -3
  73. package/.docs/raw/course/03-agent-memory/15-configuring-semantic-recall.md +0 -46
  74. package/.docs/raw/course/03-agent-memory/16-vector-store-configuration.md +0 -37
  75. package/.docs/raw/course/03-agent-memory/18-disabling-semantic-recall.md +0 -24
  76. /package/.docs/raw/{deployment/client.mdx → client-js/overview.mdx} +0 -0
  77. /package/.docs/raw/course/03-agent-memory/{14-how-semantic-recall-works.md → 15-how-semantic-recall-works.md} +0 -0
@@ -0,0 +1,28 @@
1
+ # Advanced Configuration of Semantic Recall
2
+
3
+ We can configure semantic recall in more detail by setting options for the `semanticRecall` option:
4
+
5
+ ```typescript
6
+ const memory = new Memory({
7
+ storage: new LibSQLStore({
8
+ url: "file:../../memory.db", // relative path from the `.mastra/output` directory
9
+ }),
10
+ vector: new LibSQLVector({
11
+ connectionUrl: "file:../../vector.db", // relative path from the `.mastra/output` directory
12
+ }),
13
+ embedder: openai.embedding("text-embedding-3-small"),
14
+ options: {
15
+ semanticRecall: {
16
+ topK: 3,
17
+ messageRange: {
18
+ before: 2,
19
+ after: 1,
20
+ },
21
+ },
22
+ },
23
+ });
24
+ ```
25
+
26
+ The `topK` parameter controls how many semantically similar messages are retrieved. A higher value will retrieve more messages, which can be helpful for complex topics but may also include less relevant information. The default value is `2`.
27
+
28
+ The `messageRange` parameter controls how much context is included with each match. This is important because the matching message alone might not provide enough context to understand the conversation. Including messages before and after the match helps the agent understand the context of the matched message.
@@ -6,11 +6,18 @@ Let's update our agent with working memory capabilities:
6
6
  import { Agent } from "@mastra/core/agent";
7
7
  import { Memory } from "@mastra/memory";
8
8
  import { openai } from "@ai-sdk/openai";
9
+ import { LibSQLStore, LibSQLVector } from "@mastra/libsql";
9
10
 
10
11
  // Create a memory instance with working memory configuration
11
12
  const memory = new Memory({
13
+ storage: new LibSQLStore({
14
+ url: "file:../../memory.db", // relative path from the `.mastra/output` directory
15
+ }), // Storage for message history
16
+ vector: new LibSQLVector({
17
+ connectionUrl: "file:../../vector.db", // relative path from the `.mastra/output` directory
18
+ }), // Vector database for semantic search
19
+ embedder: openai.embedding("text-embedding-3-small"), // Embedder for message embeddings
12
20
  options: {
13
- lastMessages: 20,
14
21
  semanticRecall: {
15
22
  topK: 3,
16
23
  messageRange: {
@@ -20,7 +27,6 @@ const memory = new Memory({
20
27
  },
21
28
  workingMemory: {
22
29
  enabled: true,
23
- use: "tool-call", // Recommended setting
24
30
  },
25
31
  },
26
32
  });
@@ -52,12 +58,6 @@ export const memoryAgent = new Agent({
52
58
  The `workingMemory` configuration has several important options:
53
59
 
54
60
  - `enabled`: Whether working memory is enabled
55
- - `use`: How the agent interacts with working memory (recommended setting is "tool-call")
56
-
57
- The `use` option can be set to:
58
-
59
- - `"tool-call"`: The agent updates working memory via tool calls (recommended)
60
- - `"direct"`: The agent directly edits the working memory text
61
- - `"read-only"`: The agent can read but not update working memory
61
+ - `template`: A template for the working memory content
62
62
 
63
63
  The instructions for the agent are also important. They guide the agent on what information to store in working memory and how to use that information when responding to the user.
@@ -11,8 +11,14 @@ import { openai } from "@ai-sdk/openai";
11
11
 
12
12
  // Create a memory instance with a custom working memory template
13
13
  const memory = new Memory({
14
+ storage: new LibSQLStore({
15
+ url: "file:../../memory.db", // relative path from the `.mastra/output` directory
16
+ }), // Storage for message history
17
+ vector: new LibSQLVector({
18
+ connectionUrl: "file:../../vector.db", // relative path from the `.mastra/output` directory
19
+ }), // Vector database for semantic search
20
+ embedder: openai.embedding("text-embedding-3-small"), // Embedder for message embeddings
14
21
  options: {
15
- lastMessages: 20,
16
22
  semanticRecall: {
17
23
  topK: 3,
18
24
  messageRange: {
@@ -22,7 +28,9 @@ const memory = new Memory({
22
28
  },
23
29
  workingMemory: {
24
30
  enabled: true,
25
- use: "tool-call",
31
+ },
32
+ workingMemory: {
33
+ enabled: true,
26
34
  template: `
27
35
  # User Profile
28
36
 
@@ -11,9 +11,17 @@ Let's create a comprehensive agent that utilizes conversation history, semantic
11
11
  import { Agent } from "@mastra/core/agent";
12
12
  import { Memory } from "@mastra/memory";
13
13
  import { openai } from "@ai-sdk/openai";
14
+ import { LibSQLStore, LibSQLVector } from "@mastra/libsql";
14
15
 
15
16
  // Create a comprehensive memory configuration
16
17
  const memory = new Memory({
18
+ storage: new LibSQLStore({
19
+ url: "file:../../memory.db", // relative path from the `.mastra/output` directory
20
+ }),
21
+ vector: new LibSQLVector({
22
+ connectionUrl: "file:../../vector.db", // relative path from the `.mastra/output` directory
23
+ }),
24
+ embedder: openai.embedding("text-embedding-3-small"),
17
25
  options: {
18
26
  // Conversation history configuration
19
27
  lastMessages: 20, // Include the last 20 messages in the context
@@ -30,7 +38,6 @@ const memory = new Memory({
30
38
  // Working memory configuration
31
39
  workingMemory: {
32
40
  enabled: true,
33
- use: "tool-call",
34
41
  template: `
35
42
  # User Profile
36
43
 
@@ -7,9 +7,17 @@ Let's create a practical example of a memory-enhanced agent: a Personal Learning
7
7
  import { Agent } from "@mastra/core/agent";
8
8
  import { Memory } from "@mastra/memory";
9
9
  import { openai } from "@ai-sdk/openai";
10
+ import { LibSQLStore, LibSQLVector } from "@mastra/libsql";
10
11
 
11
12
  // Create a specialized memory configuration for the learning assistant
12
13
  const learningMemory = new Memory({
14
+ storage: new LibSQLStore({
15
+ url: "file:../../memory.db", // relative path from the `.mastra/output` directory
16
+ }),
17
+ vector: new LibSQLVector({
18
+ connectionUrl: "file:../../vector.db", // relative path from the `.mastra/output` directory
19
+ }),
20
+ embedder: openai.embedding("text-embedding-3-small"),
13
21
  options: {
14
22
  lastMessages: 20,
15
23
  semanticRecall: {
@@ -21,7 +29,6 @@ const learningMemory = new Memory({
21
29
  },
22
30
  workingMemory: {
23
31
  enabled: true,
24
- use: "tool-call",
25
32
  template: `
26
33
  # Learner Profile
27
34
 
@@ -5,7 +5,9 @@ description: "Build and deploy Mastra applications using platform-specific deplo
5
5
 
6
6
  # Serverless Deployment
7
7
 
8
- This guide covers deploying Mastra to Cloudflare Workers, Vercel, and Netlify using platform-specific deployers
8
+ This guide covers deploying standalone Mastra applications to Cloudflare Workers, Vercel, and Netlify using platform-specific deployers.
9
+
10
+ Deployers **aren't** required when integrating Mastra with a framework. See [Web Framework Integration](/docs/deployment/web-framework) for more information.
9
11
 
10
12
  For self-hosted Node.js server deployment, see the [Creating A Mastra Server](/docs/deployment/server) guide.
11
13
 
@@ -18,112 +20,39 @@ Before you begin, ensure you have:
18
20
  - An account with your chosen platform
19
21
  - Required API keys or credentials
20
22
 
21
- ## Serverless Platform Deployers
22
-
23
- Platform-specific deployers handle configuration and deployment for:
24
-
25
- - **[Cloudflare Workers](/reference/deployer/cloudflare)**
26
- - **[Vercel](/reference/deployer/vercel)**
27
- - **[Netlify](/reference/deployer/netlify)**
28
- - **[Mastra Cloud](/docs/mastra-cloud/overview)** _(beta)_. You can join the [cloud waitlist](https://mastra.ai/cloud-beta) for early access.
29
-
30
- ### Installing Deployers
23
+ ## LibSQLStore
31
24
 
32
- ```bash copy
33
- # For Cloudflare
34
- npm install @mastra/deployer-cloudflare@latest
25
+ `LibSQLStore` writes to the local filesystem, which is not supported in serverless environments due to their ephemeral nature. If you're deploying to a platform like Vercel, Netlify or Cloudflare, you **must remove** all usage of `LibSQLStore`.
35
26
 
36
- # For Vercel
37
- npm install @mastra/deployer-vercel@latest
27
+ Specifically, ensure you've removed it from both `src/mastra/index.ts` and `src/mastra/agents/weather-agent.ts`:
38
28
 
39
- # For Netlify
40
- npm install @mastra/deployer-netlify@latest
41
- ```
42
-
43
- ### Configuring Deployers
44
-
45
- Configure the deployer in your entry file:
46
-
47
- ```typescript copy showLineNumbers
48
- import { Mastra } from "@mastra/core";
49
- import { PinoLogger } from "@mastra/loggers";
50
- import { CloudflareDeployer } from "@mastra/deployer-cloudflare";
51
29
 
30
+ ```diff filename="src/mastra/index.ts" showLineNumbers
52
31
  export const mastra = new Mastra({
53
- agents: {
54
- /* your agents here */
55
- },
56
- logger: new PinoLogger({ name: "MyApp", level: "debug" }),
57
- deployer: new CloudflareDeployer({
58
- scope: "your-cloudflare-scope",
59
- projectName: "your-project-name",
60
- // See complete configuration options in the reference docs
61
- }),
32
+ // ...
33
+ - storage: new LibSQLStore({
34
+ - // stores telemetry, evals, ... into memory storage, if it needs to persist, change to file:../mastra.db
35
+ - url: ":memory:",
36
+ - })
62
37
  });
63
38
  ```
64
39
 
65
- ### Deployer Configuration
66
-
67
- Each deployer has specific configuration options. Below are basic examples, but refer to the reference documentation for complete details.
68
-
69
- #### Cloudflare Deployer
70
-
71
- ```typescript copy showLineNumbers
72
- new CloudflareDeployer({
73
- scope: "your-cloudflare-account-id",
74
- projectName: "your-project-name",
75
- // For complete configuration options, see the reference documentation
40
+ ``` diff filename="src/mastra/agents/weather-agent.ts" showLineNumbers
41
+ export const weatherAgent = new Agent({
42
+ // ..
43
+ - memory: new Memory({
44
+ - storage: new LibSQLStore({
45
+ - url: "file:../mastra.db" // path is relative to the .mastra/output directory
46
+ - })
47
+ - })
76
48
  });
77
49
  ```
78
50
 
79
- [View Cloudflare Deployer Reference →](/reference/deployer/cloudflare)
80
-
81
- #### Vercel Deployer
82
-
83
- ```typescript copy showLineNumbers
84
- new VercelDeployer({
85
- teamSlug: "your-vercel-team-slug",
86
- projectName: "your-project-name",
87
- token: "your-vercel-token",
88
- // For complete configuration options, see the reference documentation
89
- });
90
- ```
91
-
92
- [View Vercel Deployer Reference →](/reference/deployer/vercel)
93
-
94
- #### Netlify Deployer
95
-
96
- ```typescript copy showLineNumbers
97
- new NetlifyDeployer({
98
- scope: "your-netlify-team-slug",
99
- projectName: "your-project-name",
100
- token: "your-netlify-token",
101
- });
102
- ```
103
-
104
- [View Netlify Deployer Reference →](/reference/deployer/netlify)
105
-
106
- ## Environment Variables
107
-
108
- Required variables:
109
-
110
- 1. Platform deployer variables (if using platform deployers):
111
- - Platform credentials
112
- 2. Agent API keys:
113
- - `OPENAI_API_KEY`
114
- - `ANTHROPIC_API_KEY`
115
- 3. Server configuration (for universal deployment):
116
- - `PORT`: HTTP server port (default: 3000)
117
- - `HOST`: Server host (default: 0.0.0.0)
118
-
119
- ## Build Mastra Project
120
-
121
- To build your Mastra project for your target platform run:
51
+ ## Serverless Platform Deployers
122
52
 
123
- ```bash
124
- npx mastra build
125
- ```
53
+ Platform-specific deployers handle configuration and deployment for:
126
54
 
127
- When a Deployer is used, the build output is automatically prepared for the target platform.
128
- You can then deploy the build output `.mastra/output` via your platform's (Vercel, netlify, cloudfare e.t.c)
129
- CLI/UI.
55
+ - **[Cloudflare Workers](/reference/deployer/cloudflare)**
56
+ - **[Vercel](/reference/deployer/vercel)**
57
+ - **[Netlify](/reference/deployer/netlify)**
58
+ - **[Mastra Cloud](/docs/mastra-cloud/overview)** _(beta)_. You can join the [cloud waitlist](https://mastra.ai/cloud-beta) for early access.
@@ -5,7 +5,7 @@ description: Learn about different deployment options for your Mastra applicatio
5
5
 
6
6
  # Deployment Overview
7
7
 
8
- Mastra offers multiple deployment options to suit your application's needs, from fully-managed solutions to self-hosted options. This guide will help you understand the available deployment paths and choose the right one for your project.
8
+ Mastra offers multiple deployment options to suit your application's needs, from fully-managed solutions to self-hosted options, and web framework integrations. This guide will help you understand the available deployment paths and choose the right one for your project.
9
9
 
10
10
  ## Deployment Options
11
11
 
@@ -21,6 +21,18 @@ Mastra Cloud is a deployment platform that connects to your GitHub repository, a
21
21
 
22
22
  [View Mastra Cloud documentation →](/docs/mastra-cloud/overview)
23
23
 
24
+
25
+ ### With a Web Framework
26
+
27
+ Mastra can be integrated with a variety of web frameworks. For example, see one of the following for a detailed guide.
28
+
29
+ - [With Next.js](/docs/frameworks/web-frameworks/next-js)
30
+ - [With Astro](/docs/frameworks/web-frameworks/astro)
31
+
32
+ When integrated with a framework, Mastra typically requires no additional configuration for deployment.
33
+
34
+ [View Web Framework Integration →](/docs/deployment/web-framework)
35
+
24
36
  ### With a Server
25
37
 
26
38
  You can deploy Mastra as a standard Node.js HTTP server, which gives you full control over your infrastructure and deployment environment.
@@ -56,8 +68,11 @@ Once your Mastra application is deployed, you'll need to configure your client t
56
68
 
57
69
  ## Choosing a Deployment Option
58
70
 
59
- | Option | Best For | Key Benefits |
71
+ | Option | Best For | Key Benefits |
60
72
  | ------------------------ | ------------------------------------------------------------- | -------------------------------------------------------------- |
61
73
  | **Mastra Cloud** | Teams wanting to ship quickly without infrastructure concerns | Fully-managed, automatic scaling, built-in observability |
74
+ | **Framework Deployment** | Teams already using Next.js, Astro etc | Simplify deployment with a unified codebase for frontend and backend |
62
75
  | **Server Deployment** | Teams needing maximum control and customization | Full control, custom middleware, integrate with existing apps |
63
- | **Serverless Platforms** | Teams already using Vercel, Netlify, or Cloudflare | Platform integration, simplified deployment, automatic scaling |
76
+ | **Serverless Platforms** | Teams already using Vercel, Netlify, or Cloudflare | Platform integration, simplified deployment, automatic scaling |
77
+
78
+
@@ -0,0 +1,63 @@
1
+ ---
2
+ title: "Deploying Mastra with a Web Framework"
3
+ description: "Learn how Mastra can be deployed when integrated with a Web Framework"
4
+ ---
5
+
6
+ # Web Framework Integration
7
+
8
+ This guide covers deploying integrated Mastra applications. Mastra can be integrated with a variety of web frameworks, see one of the following for a detailed guide.
9
+
10
+ - [With Next.js](/docs/frameworks/web-frameworks/next-js)
11
+ - [With Astro](/docs/frameworks/web-frameworks/astro)
12
+
13
+ When integrated with a framework, Mastra typically requires no additional configuration for deployment.
14
+
15
+ ## With Next.js on Vercel
16
+
17
+ If you've integrated Mastra with Next.js [by following our guide](/docs/frameworks/web-frameworks/next-js) and plan to deploy to Vercel, no additional setup is required.
18
+
19
+ The only thing to verify is that you've added the following to your `next.config.ts` and removed any usage of [LibSQLStore](/docs/deployment/deployment#libsqlstore), which is not supported in serverless environments:
20
+
21
+ ```typescript {4} filename="next.config.ts" showLineNumbers copy
22
+ import type { NextConfig } from "next";
23
+
24
+ const nextConfig: NextConfig = {
25
+ serverExternalPackages: ["@mastra/*"],
26
+ };
27
+
28
+ export default nextConfig;
29
+ ```
30
+
31
+ ## With Astro on Vercel
32
+
33
+ If you've integrated Mastra with Astro [by following our guide](/docs/frameworks/web-frameworks/astro) and plan to deploy to Vercel, no additional setup is required.
34
+
35
+ The only thing to verify is that you've added the following to your `astro.config.mjs` and removed any usage of [LibSQLStore](/docs/deployment/deployment#libsqlstore), which is not supported in serverless environments:
36
+
37
+ ```javascript {2,6,7} filename="astro.config.mjs" showLineNumbers copy
38
+ import { defineConfig } from 'astro/config';
39
+ import vercel from '@astrojs/vercel';
40
+
41
+ export default defineConfig({
42
+ // ...
43
+ adapter: vercel(),
44
+ output: "server"
45
+ });
46
+ ```
47
+
48
+ ## With Astro on Netlify
49
+
50
+ If you've integrated Mastra with Astro [by following our guide](/docs/frameworks/web-frameworks/astro) and plan to deploy to Vercel, no additional setup is required.
51
+
52
+ The only thing to verify is that you've added the following to your `astro.config.mjs` and removed any usage of [LibSQLStore](/docs/deployment/deployment#libsqlstore), which is not supported in serverless environments:
53
+
54
+ ```javascript {2,6,7} filename="astro.config.mjs" showLineNumbers copy
55
+ import { defineConfig } from 'astro/config';
56
+ import vercel from '@astrojs/netlify';
57
+
58
+ export default defineConfig({
59
+ // ...
60
+ adapter: netlify(),
61
+ output: "server"
62
+ });
63
+ ```
@@ -109,10 +109,11 @@ OPENAI_API_KEY=<your-api-key>
109
109
 
110
110
  ## Update .gitignore
111
111
 
112
- Add `.mastra` to your `.gitignore` file:
112
+ Add `.mastra` and `.vercel` to your `.gitignore` file:
113
113
 
114
114
  ```bash filename=".gitignore" copy
115
115
  .mastra
116
+ .vercel
116
117
  ```
117
118
 
118
119
  ## Update the Mastra Agent
@@ -365,6 +366,7 @@ Add `.mastra` to your `.gitignore` file:
365
366
 
366
367
  ```bash filename=".gitignore" copy
367
368
  .mastra
369
+ .vercel
368
370
  ```
369
371
 
370
372
  ## Update the Mastra Agent
@@ -510,3 +512,7 @@ Let me know if you need more information!
510
512
  </Steps>
511
513
  </Tabs.Tab>
512
514
  </Tabs>
515
+
516
+ ## Next Steps
517
+
518
+ - [Deployment | With Astro on Vercel](/docs/deployment/web-framework#with-astro-on-vercel)
@@ -491,4 +491,4 @@ Let me know if you need more information!
491
491
 
492
492
  ## Next Steps
493
493
 
494
- - [Serverless Deployment](/docs/deployment/deployment)
494
+ - [Deployment | With Next.js on Vercel](/docs/deployment/web-framework#with-nextjs-on-vercel)