@mastra/mcp-docs-server 0.13.5-alpha.0 → 0.13.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +76 -76
  2. package/.docs/organized/changelogs/%40mastra%2Fagui.md +61 -61
  3. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +54 -54
  4. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +211 -211
  5. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +75 -75
  6. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +91 -91
  7. package/.docs/organized/changelogs/%40mastra%2Fcore.md +185 -185
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +237 -237
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +143 -143
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +143 -143
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +235 -235
  12. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +84 -84
  13. package/.docs/organized/changelogs/%40mastra%2Fevals.md +56 -56
  14. package/.docs/organized/changelogs/%40mastra%2Ffastembed.md +7 -0
  15. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +75 -75
  16. package/.docs/organized/changelogs/%40mastra%2Flance.md +55 -0
  17. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +56 -56
  18. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +25 -25
  19. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +38 -38
  20. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +119 -119
  21. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +58 -58
  22. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +69 -0
  23. package/.docs/organized/changelogs/%40mastra%2Fpg.md +119 -119
  24. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +244 -244
  25. package/.docs/organized/changelogs/%40mastra%2Frag.md +73 -73
  26. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +24 -0
  27. package/.docs/organized/changelogs/%40mastra%2Fserver.md +211 -211
  28. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +76 -76
  29. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +44 -44
  30. package/.docs/organized/changelogs/create-mastra.md +126 -126
  31. package/.docs/organized/changelogs/mastra.md +256 -256
  32. package/.docs/organized/code-examples/agent.md +6 -0
  33. package/.docs/organized/code-examples/agui.md +3 -3
  34. package/.docs/organized/code-examples/ai-sdk-useChat.md +2 -2
  35. package/.docs/organized/code-examples/ai-sdk-v5.md +201 -0
  36. package/.docs/organized/code-examples/assistant-ui.md +2 -2
  37. package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +2 -2
  38. package/.docs/organized/code-examples/bird-checker-with-nextjs.md +2 -2
  39. package/.docs/organized/code-examples/client-side-tools.md +1 -1
  40. package/.docs/organized/code-examples/crypto-chatbot.md +5 -5
  41. package/.docs/organized/code-examples/openapi-spec-writer.md +2 -2
  42. package/.docs/organized/code-examples/workflow-with-suspend-resume.md +181 -0
  43. package/.docs/raw/agents/agent-memory.mdx +126 -0
  44. package/.docs/raw/agents/dynamic-agents.mdx +34 -2
  45. package/.docs/raw/agents/overview.mdx +21 -33
  46. package/.docs/raw/community/licensing.mdx +27 -19
  47. package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +60 -26
  48. package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
  49. package/.docs/raw/deployment/cloud-providers/index.mdx +44 -9
  50. package/.docs/raw/deployment/server-deployment.mdx +56 -0
  51. package/.docs/raw/deployment/serverless-platforms/cloudflare-deployer.mdx +9 -30
  52. package/.docs/raw/deployment/serverless-platforms/index.mdx +13 -13
  53. package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +291 -216
  54. package/.docs/raw/frameworks/agentic-uis/assistant-ui.mdx +0 -34
  55. package/.docs/raw/frameworks/agentic-uis/copilotkit.mdx +162 -181
  56. package/.docs/raw/frameworks/servers/express.mdx +1 -1
  57. package/.docs/raw/frameworks/web-frameworks/astro.mdx +2 -2
  58. package/.docs/raw/frameworks/web-frameworks/next-js.mdx +1 -1
  59. package/.docs/raw/frameworks/web-frameworks/sveltekit.mdx +4 -4
  60. package/.docs/raw/frameworks/web-frameworks/vite-react.mdx +1 -1
  61. package/.docs/raw/getting-started/installation.mdx +10 -7
  62. package/.docs/raw/getting-started/model-capability.mdx +1 -1
  63. package/.docs/raw/memory/overview.mdx +8 -0
  64. package/.docs/raw/memory/semantic-recall.mdx +6 -0
  65. package/.docs/raw/observability/tracing.mdx +30 -0
  66. package/.docs/raw/rag/retrieval.mdx +24 -5
  67. package/.docs/raw/reference/agents/agent.mdx +2 -2
  68. package/.docs/raw/reference/cli/create-mastra.mdx +7 -0
  69. package/.docs/raw/reference/cli/dev.mdx +4 -3
  70. package/.docs/raw/reference/client-js/agents.mdx +8 -0
  71. package/.docs/raw/reference/memory/query.mdx +35 -14
  72. package/.docs/raw/reference/observability/providers/keywordsai.mdx +73 -0
  73. package/.docs/raw/reference/rag/rerankWithScorer.mdx +213 -0
  74. package/.docs/raw/reference/storage/mssql.mdx +108 -0
  75. package/.docs/raw/server-db/custom-api-routes.mdx +67 -0
  76. package/.docs/raw/server-db/production-server.mdx +66 -0
  77. package/.docs/raw/tools-mcp/mcp-overview.mdx +28 -7
  78. package/.docs/raw/workflows/control-flow.mdx +91 -93
  79. package/.docs/raw/workflows/input-data-mapping.mdx +31 -43
  80. package/.docs/raw/workflows/overview.mdx +22 -12
  81. package/.docs/raw/workflows/pausing-execution.mdx +49 -4
  82. package/.docs/raw/workflows/suspend-and-resume.mdx +17 -16
  83. package/.docs/raw/workflows/using-with-agents-and-tools.mdx +16 -13
  84. package/.docs/raw/workflows-legacy/overview.mdx +11 -0
  85. package/LICENSE.md +11 -42
  86. package/package.json +7 -9
  87. package/.docs/raw/deployment/custom-api-routes.mdx +0 -55
  88. package/.docs/raw/deployment/server.mdx +0 -116
  89. package/.docs/raw/frameworks/ai-sdk-v5.mdx +0 -91
  90. /package/.docs/raw/{local-dev/mastra-dev.mdx → server-db/local-dev-playground.mdx} +0 -0
  91. /package/.docs/raw/{client-js/overview.mdx → server-db/mastra-client.mdx} +0 -0
  92. /package/.docs/raw/{deployment → server-db}/middleware.mdx +0 -0
  93. /package/.docs/raw/{storage/overview.mdx → server-db/storage.mdx} +0 -0
@@ -79,7 +79,7 @@ OPENAI_API_KEY=<your-api-key>
79
79
  ```
80
80
  > This example uses OpenAI. Each LLM provider uses a unique name. See [Model Capabilities](/docs/getting-started/model-capability) for more information.
81
81
 
82
- You can now launch the [Mastra Development Server](/docs/local-dev/mastra-dev) and test your agent using the Mastra Playground.
82
+ You can now launch the [Mastra Development Server](/docs/server-db/local-dev-playground) and test your agent using the Mastra Playground.
83
83
 
84
84
  </Steps>
85
85
 
@@ -114,7 +114,7 @@ This helps users manually set up a Mastra project with their preferred package m
114
114
 
115
115
  npm install typescript tsx @types/node mastra@latest --save-dev
116
116
 
117
- npm install @mastra/core@latest zod @ai-sdk/openai
117
+ npm install @mastra/core@latest zod@^3 @ai-sdk/openai
118
118
  ```
119
119
 
120
120
  </Tab>
@@ -124,7 +124,7 @@ This helps users manually set up a Mastra project with their preferred package m
124
124
 
125
125
  pnpm add typescript tsx @types/node mastra@latest --save-dev
126
126
 
127
- pnpm add @mastra/core@latest zod @ai-sdk/openai
127
+ pnpm add @mastra/core@latest zod@^3 @ai-sdk/openai
128
128
  ```
129
129
 
130
130
  </Tab>
@@ -134,7 +134,7 @@ This helps users manually set up a Mastra project with their preferred package m
134
134
 
135
135
  yarn add typescript tsx @types/node mastra@latest --dev
136
136
 
137
- yarn add @mastra/core@latest zod @ai-sdk/openai
137
+ yarn add @mastra/core@latest zod@^3 @ai-sdk/openai
138
138
  ```
139
139
 
140
140
  </Tab>
@@ -144,7 +144,7 @@ This helps users manually set up a Mastra project with their preferred package m
144
144
 
145
145
  bun add typescript tsx @types/node mastra@latest --dev
146
146
 
147
- bun add @mastra/core@latest zod @ai-sdk/openai
147
+ bun add @mastra/core@latest zod@^3 @ai-sdk/openai
148
148
  ```
149
149
 
150
150
  </Tab>
@@ -295,7 +295,7 @@ export const mastra = new Mastra({
295
295
  });
296
296
  ```
297
297
 
298
- You can now launch the [Mastra Development Server](/docs/local-dev/mastra-dev) and test your agent using the Mastra Playground.
298
+ You can now launch the [Mastra Development Server](/docs/server-db/local-dev-playground) and test your agent using the Mastra Playground.
299
299
 
300
300
  </Steps>
301
301
 
@@ -317,5 +317,8 @@ To install Mastra in an existing project, use the `mastra init` command.
317
317
 
318
318
  ## Next steps
319
319
 
320
- - [Local Development](/docs/local-dev/mastra-dev)
320
+ - [Local Development](/docs/server-db/local-dev-playground)
321
321
  - [Deploy to Mastra Cloud](/docs/deployment/overview)
322
+
323
+
324
+ /docs/server-db/local-dev-playground
@@ -1,4 +1,4 @@
1
- ## Model Capabilities
1
+ # Model Capabilities
2
2
 
3
3
  import { ProviderTable } from "@/components/provider-table";
4
4
 
@@ -156,6 +156,14 @@ const agent = new Agent({
156
156
  - [Postgres](/examples/memory/memory-with-pg)
157
157
  - [Upstash](/examples/memory/memory-with-upstash)
158
158
 
159
+ ## Viewing Retrieved Messages
160
+
161
+ If tracing is enabled in your Mastra deployment and memory is configured either with `lastMessages` and/or `semanticRecall`, the agent’s trace output will show all messages retrieved for context—including both recent conversation history and messages recalled via semantic recall.
162
+
163
+ This is helpful for debugging, understanding agent decisions, and verifying that the agent is retrieving the right information for each request.
164
+
165
+ For more details on enabling and configuring tracing, see [Tracing](../observability/tracing).
166
+
159
167
  ## Next Steps
160
168
 
161
169
  Now that you understand the core concepts, continue to [semantic recall](./semantic-recall.mdx) to learn how to add RAG memory to your Mastra agents.
@@ -149,3 +149,9 @@ You might want to disable semantic recall in scenarios like:
149
149
 
150
150
  - When conversation history provide sufficient context for the current conversation.
151
151
  - In performance-sensitive applications, like realtime two-way audio, where the added latency of creating embeddings and running vector queries is noticeable.
152
+
153
+ ## Viewing Recalled Messages
154
+
155
+ When tracing is enabled, any messages retrieved via semantic recall will appear in the agent’s trace output, alongside recent conversation history (if configured).
156
+
157
+ For more info on viewing message traces, see [Viewing Retrieved Messages](./overview.mdx#viewing-retrieved-messages).
@@ -105,6 +105,36 @@ Here's what a traced agent interaction looks like in [SigNoz](https://signoz.io)
105
105
 
106
106
  For a complete list of supported observability providers and their configuration details, see the [Observability Providers reference](../../reference/observability/providers/).
107
107
 
108
+ ### Custom Instrumentation files
109
+
110
+ You can define custom instrumentation files in your Mastra project by placing them in the `/mastra` folder. Mastra automatically detects and bundles these files instead of using the default instrumentation.
111
+
112
+ #### Supported File Types
113
+
114
+ Mastra looks for instrumentation files with these extensions:
115
+ - `instrumentation.js`
116
+ - `instrumentation.ts`
117
+ - `instrumentation.mjs`
118
+
119
+ #### Example
120
+
121
+ ```ts filename="/mastra/instrumentation.ts" showLineNumbers copy
122
+ import { NodeSDK } from '@opentelemetry/sdk-node';
123
+ import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node';
124
+ import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http';
125
+
126
+ const sdk = new NodeSDK({
127
+ traceExporter: new OTLPTraceExporter({
128
+ url: 'http://localhost:4318/v1/traces',
129
+ }),
130
+ instrumentations: [getNodeAutoInstrumentations()],
131
+ });
132
+
133
+ sdk.start();
134
+ ```
135
+
136
+ When Mastra finds a custom instrumentation file, it automatically replaces the default instrumentation and bundles it during the build process.
137
+
108
138
  ### Next.js-specific Tracing steps
109
139
 
110
140
  If you're using Next.js, you have three additional configuration steps:
@@ -456,7 +456,10 @@ Here's how to use re-ranking:
456
456
 
457
457
  ```ts showLineNumbers copy
458
458
  import { openai } from "@ai-sdk/openai";
459
- import { rerank } from "@mastra/rag";
459
+ import {
460
+ rerankWithScorer as rerank,
461
+ MastraAgentRelevanceScorer
462
+ } from "@mastra/rag";
460
463
 
461
464
  // Get initial results from vector search
462
465
  const initialResults = await pgVector.query({
@@ -465,19 +468,35 @@ const initialResults = await pgVector.query({
465
468
  topK: 10,
466
469
  });
467
470
 
471
+ // Create a relevance scorer
472
+ const relevanceProvider = new MastraAgentRelevanceScorer('relevance-scorer', openai("gpt-4o-mini"));
473
+
468
474
  // Re-rank the results
469
- const rerankedResults = await rerank(
470
- initialResults,
475
+ const rerankedResults = await rerank({
476
+ results: initialResults,
471
477
  query,
472
- openai("gpt-4o-mini"),
478
+ provider: relevanceProvider,
479
+ options: {
480
+ topK: 10,
481
+ },
473
482
  );
474
483
  ```
475
484
 
476
485
  > **Note:** For semantic scoring to work properly during re-ranking, each result must include the text content in its `metadata.text` field.
477
486
 
487
+ You can also use other relevance score providers like Cohere or ZeroEntropy:
488
+
489
+ ```ts showLineNumbers copy
490
+ const relevanceProvider = new CohereRelevanceScorer('rerank-v3.5');
491
+ ```
492
+
493
+ ```ts showLineNumbers copy
494
+ const relevanceProvider = new ZeroEntropyRelevanceScorer('zerank-1');
495
+ ```
496
+
478
497
  The re-ranked results combine vector similarity with semantic understanding to improve retrieval quality.
479
498
 
480
- For more details about re-ranking, see the [rerank()](/reference/rag/rerank) method.
499
+ For more details about re-ranking, see the [rerank()](/reference/rag/rerankWithScorer) method.
481
500
 
482
501
  For an example of how to use the re-ranking method, see the [Re-ranking Results](../../examples/rag/rerank/rerank.mdx) example.
483
502
 
@@ -88,10 +88,10 @@ constructor(config: AgentConfig<TAgentId, TTools, TMetrics>)
88
88
  },
89
89
  {
90
90
  name: "memory",
91
- type: "MastraMemory",
91
+ type: "MastraMemory | ({ runtimeContext: RuntimeContext }) => MastraMemory | Promise<MastraMemory>",
92
92
  isOptional: true,
93
93
  description:
94
- "Memory system for the agent to store and retrieve information.",
94
+ "Memory system for the agent to store and retrieve information. Can be a static memory instance or a function that returns a memory instance based on runtime context.",
95
95
  },
96
96
  {
97
97
  name: "voice",
@@ -68,6 +68,13 @@ create-mastra [options]
68
68
  description: "Do not include example code",
69
69
  isOptional: true,
70
70
  },
71
+ {
72
+ name: "--template",
73
+ type: "string",
74
+ description:
75
+ "Create project from a template (use template name, public GitHub URL, or leave blank to select from list)",
76
+ isOptional: true,
77
+ },
71
78
  {
72
79
  name: "--timeout",
73
80
  type: "number",
@@ -44,13 +44,13 @@ mastra dev [options]
44
44
  },
45
45
  {
46
46
  name: "--env",
47
- type: "string",
47
+ type: "string",
48
48
  description: "Path to custom environment file",
49
49
  isOptional: true,
50
50
  },
51
51
  {
52
52
  name: "--inspect",
53
- type: "boolean",
53
+ type: "boolean",
54
54
  description: "Start the dev server in inspect mode for debugging (cannot be used with --inspect-brk)",
55
55
  isOptional: true,
56
56
  },
@@ -123,6 +123,7 @@ Workflows are expected to be exported from `src/mastra/workflows` (or the config
123
123
  - **GET `/api/memory/threads`**: Get all threads.
124
124
  - **GET `/api/memory/threads/:threadId`**: Get thread by ID.
125
125
  - **GET `/api/memory/threads/:threadId/messages`**: Get messages for a thread.
126
+ - **GET `/api/memory/threads/:threadId/messages/paginated`**: Get paginated messages for a thread.
126
127
  - **POST `/api/memory/threads`**: Create a new thread.
127
128
  - **PATCH `/api/memory/threads/:threadId`**: Update a thread.
128
129
  - **DELETE `/api/memory/threads/:threadId`**: Delete a thread.
@@ -154,7 +155,7 @@ Workflows are expected to be exported from `src/mastra/workflows` (or the config
154
155
 
155
156
  ## Additional Notes
156
157
 
157
- The port defaults to 4111. Both the port and hostname can be configured via the mastra server config. See [Launch Development Server](/docs/local-dev/mastra-dev#launch-development-server) for configuration details.
158
+ The port defaults to 4111. Both the port and hostname can be configured via the mastra server config. See [Launch Development Server](/docs/server-db/local-dev-playground) for configuration details.
158
159
 
159
160
  Make sure you have your environment variables set up in your `.env.development` or `.env` file for any providers you use (e.g., `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, etc.).
160
161
 
@@ -89,6 +89,14 @@ response.processDataStream({
89
89
  },
90
90
  });
91
91
 
92
+ // Process text stream with the processTextStream util
93
+ // (used with structured output)
94
+ response.processTextStream({
95
+ onTextPart: text => {
96
+ process.stdout.write(text);
97
+ },
98
+ });
99
+
92
100
  // You can also read from response body directly
93
101
  const reader = response.body.getReader();
94
102
  while (true) {
@@ -1,6 +1,6 @@
1
1
  # query
2
2
 
3
- Retrieves messages from a specific thread, with support for pagination and filtering options.
3
+ Retrieves messages from a specific thread, with support for pagination, filtering options, and semantic search.
4
4
 
5
5
  ## Usage Example
6
6
 
@@ -12,7 +12,7 @@ const memory = new Memory({
12
12
  });
13
13
 
14
14
  // Get last 50 messages
15
- const { messages, uiMessages } = await memory.query({
15
+ const { messages, uiMessages, messagesV2 } = await memory.query({
16
16
  threadId: "thread-123",
17
17
  selectBy: {
18
18
  last: 50,
@@ -43,7 +43,7 @@ const { messages } = await memory.query({
43
43
  vectorSearchString: "What was discussed about deployment?",
44
44
  },
45
45
  threadConfig: {
46
- historySearch: true,
46
+ semanticRecall: true,
47
47
  },
48
48
  });
49
49
  ```
@@ -69,13 +69,13 @@ const { messages } = await memory.query({
69
69
  {
70
70
  name: "selectBy",
71
71
  type: "object",
72
- description: "Options for filtering messages",
72
+ description: "Options for filtering and selecting messages",
73
73
  isOptional: true,
74
74
  },
75
75
  {
76
76
  name: "threadConfig",
77
77
  type: "MemoryConfig",
78
- description: "Configuration options for message retrieval",
78
+ description: "Configuration options for message retrieval and semantic search",
79
79
  isOptional: true,
80
80
  },
81
81
  ]}
@@ -88,7 +88,7 @@ const { messages } = await memory.query({
88
88
  {
89
89
  name: "vectorSearchString",
90
90
  type: "string",
91
- description: "Search string for finding semantically similar messages",
91
+ description: "Search string for finding semantically similar messages. Requires semantic recall to be enabled in threadConfig.",
92
92
  isOptional: true,
93
93
  },
94
94
  {
@@ -102,9 +102,9 @@ const { messages } = await memory.query({
102
102
  {
103
103
  name: "include",
104
104
  type: "array",
105
- description: "Array of message IDs to include with context",
105
+ description: "Array of specific message IDs to include with optional context messages",
106
106
  isOptional: true,
107
- },
107
+ }
108
108
  ]}
109
109
  />
110
110
 
@@ -118,6 +118,12 @@ const { messages } = await memory.query({
118
118
  description: "ID of the message to include",
119
119
  isOptional: false,
120
120
  },
121
+ {
122
+ name: "threadId",
123
+ type: "string",
124
+ description: "Optional thread ID (defaults to the main threadId parameter)",
125
+ isOptional: true,
126
+ },
121
127
  {
122
128
  name: "withPreviousMessages",
123
129
  type: "number",
@@ -142,22 +148,37 @@ const { messages } = await memory.query({
142
148
  {
143
149
  name: "messages",
144
150
  type: "CoreMessage[]",
145
- description: "Array of retrieved messages in their core format",
151
+ description: "Array of retrieved messages in their core format (v1 format for backwards compatibility)",
146
152
  },
147
153
  {
148
154
  name: "uiMessages",
149
- type: "AiMessage[]",
150
- description: "Array of messages formatted for UI display",
155
+ type: "UIMessage[]",
156
+ description: "Array of messages formatted for UI display, including proper threading of tool calls and results",
157
+ },
158
+ {
159
+ name: "messagesV2",
160
+ type: "MastraMessageV2[]",
161
+ description: "Array of messages in the v2 format, the current internal message format",
151
162
  },
152
163
  ]}
153
164
  />
154
165
 
155
166
  ## Additional Notes
156
167
 
157
- The `query` function returns two different message formats:
168
+ The `query` function returns three different message formats:
169
+
170
+ - `messages`: Core message format (v1) used for backwards compatibility with older APIs
171
+ - `uiMessages`: Formatted messages suitable for UI display, including proper threading of tool calls and results
172
+ - `messagesV2`: The current internal message format with enhanced structure and metadata
173
+
174
+ ### Semantic Search
175
+
176
+ When using `vectorSearchString`, ensure that:
177
+ - Semantic recall is enabled in your `threadConfig`
178
+ - A vector database is configured in your Memory instance
179
+ - An embedding model is provided
158
180
 
159
- - `messages`: Core message format used internally
160
- - `uiMessages`: Formatted messages suitable for UI display, including proper threading of tool calls and results
181
+ The function will automatically include context messages around semantically similar results based on the configured `messageRange`.
161
182
 
162
183
  ### Related
163
184
 
@@ -0,0 +1,73 @@
1
+ ---
2
+ title: "Reference: Keywords AI Integration | Mastra Observability Docs"
3
+ description: Documentation for integrating Keywords AI (an observability platform for LLM applications) with Mastra.
4
+ ---
5
+
6
+ ## Keywords AI
7
+
8
+ [Keywords AI](https://docs.keywordsai.co/get-started/overview) is a full-stack LLM engineering platform that helps developers and PMs build reliable AI products faster. In a shared workspace, product teams can build, monitor, and improve AI performance.
9
+
10
+ This tutorial shows how to set up Keywords AI tracing with [Mastra](https://mastra.ai/) to monitor and trace your AI-powered applications.
11
+
12
+ To help you get started quickly, we’ve provided a pre-built example. You can find the code [on GitHub](https://github.com/Keywords-AI/keywordsai-example-projects/tree/main/mastra-ai-weather-agent).
13
+
14
+
15
+ ## Setup
16
+
17
+ Here's the tutorial about the Mastra Weather Agent example.
18
+
19
+ ### 1. Install Dependencies
20
+
21
+ ```bash copy
22
+ pnpm install
23
+ ```
24
+
25
+ ### 2. Environment Variables
26
+
27
+ Copy the example environment file and add your API keys:
28
+
29
+ ```bash copy
30
+ cp .env.local.example .env.local
31
+ ```
32
+
33
+ Update .env.local with your credentials:
34
+
35
+ ```bash .env.local copy
36
+ OPENAI_API_KEY=your-openai-api-key
37
+ KEYWORDSAI_API_KEY=your-keywordsai-api-key
38
+ KEYWORDSAI_BASE_URL=https://api.keywordsai.co
39
+ ```
40
+
41
+ ### 3. Setup Mastra client with Keywords AI tracing
42
+
43
+ Configure with KeywordsAI telemetry in `src/mastra/index.ts`:
44
+
45
+ ```typescript filename="src/mastra/index.ts" showLineNumbers copy
46
+
47
+ import { Mastra } from "@mastra/core/mastra";
48
+ import { KeywordsAIExporter } from "@keywordsai/exporter-vercel";
49
+
50
+ telemetry: {
51
+ serviceName: "keywordai-mastra-example",
52
+ enabled: true,
53
+ export: {
54
+ type: "custom",
55
+ exporter: new KeywordsAIExporter({
56
+ apiKey: process.env.KEYWORDSAI_API_KEY,
57
+ baseUrl: process.env.KEYWORDSAI_BASE_URL,
58
+ debug: true,
59
+ })
60
+ }
61
+ }
62
+ ```
63
+
64
+ ### 3. Run the Project
65
+
66
+ ```bash copy
67
+ mastra dev
68
+ ```
69
+ This opens the Mastra playground where you can interact with the weather agent.
70
+
71
+ ## Observability
72
+
73
+ Once configured, you can view your traces and analytics in the [Keywords AI platform](https://platform.keywordsai.co/platform/traces).
@@ -0,0 +1,213 @@
1
+ ---
2
+ title: "Reference: Rerank | Document Retrieval | RAG | Mastra Docs"
3
+ description: Documentation for the rerank function in Mastra, which provides advanced reranking capabilities for vector search results.
4
+ ---
5
+
6
+ # rerankWithScorer()
7
+
8
+ The `rerankWithScorer()` function provides advanced reranking capabilities for vector search results by combining semantic relevance, vector similarity, and position-based scoring.
9
+
10
+ ```typescript
11
+ function rerankWithScorer({
12
+ results: QueryResult[],
13
+ query: string,
14
+ scorer: RelevanceScoreProvider,
15
+ options?: RerankerFunctionOptions,
16
+ }): Promise<RerankResult[]>;
17
+ ```
18
+
19
+ ## Usage Example
20
+
21
+ ```typescript
22
+ import { openai } from "@ai-sdk/openai";
23
+ import { rerankWithScorer as rerank, CohereRelevanceScorer } from "@mastra/rag";
24
+
25
+ const scorer = new CohereRelevanceScorer('rerank-v3.5');
26
+
27
+ const rerankedResults = await rerank({
28
+ results: vectorSearchResults,
29
+ query: "How do I deploy to production?",
30
+ scorer,
31
+ options: {
32
+ weights: {
33
+ semantic: 0.5,
34
+ vector: 0.3,
35
+ position: 0.2,
36
+ },
37
+ topK: 3,
38
+ },
39
+ });
40
+ ```
41
+
42
+ ## Parameters
43
+
44
+ <PropertiesTable
45
+ content={[
46
+ {
47
+ name: "results",
48
+ type: "QueryResult[]",
49
+ description: "The vector search results to rerank",
50
+ isOptional: false,
51
+ },
52
+ {
53
+ name: "query",
54
+ type: "string",
55
+ description: "The search query text used to evaluate relevance",
56
+ isOptional: false,
57
+ },
58
+ {
59
+ name: "scorer",
60
+ type: "RelevanceScoreProvider",
61
+ description: "The relevance scorer to use for reranking",
62
+ isOptional: false,
63
+ },
64
+ {
65
+ name: "options",
66
+ type: "RerankerFunctionOptions",
67
+ description: "Options for the reranking model",
68
+ isOptional: true,
69
+ },
70
+ ]}
71
+ />
72
+
73
+ The `rerankWithScorer` function accepts any `RelevanceScoreProvider` from @mastra/rag.
74
+
75
+ > **Note:** For semantic scoring to work properly during re-ranking, each result must include the text content in its `metadata.text` field.
76
+
77
+ ### RerankerFunctionOptions
78
+
79
+ <PropertiesTable
80
+ content={[
81
+ {
82
+ name: "weights",
83
+ type: "WeightConfig",
84
+ description:
85
+ "Weights for different scoring components (must add up to 1)",
86
+ isOptional: true,
87
+ properties: [
88
+ {
89
+ type: "number",
90
+ parameters: [
91
+ {
92
+ name: "semantic",
93
+ description: "Weight for semantic relevance",
94
+ isOptional: true,
95
+ type: "number (default: 0.4)",
96
+ },
97
+ ],
98
+ },
99
+ {
100
+ type: "number",
101
+ parameters: [
102
+ {
103
+ name: "vector",
104
+ description: "Weight for vector similarity",
105
+ isOptional: true,
106
+ type: "number (default: 0.4)",
107
+ },
108
+ ],
109
+ },
110
+ {
111
+ type: "number",
112
+ parameters: [
113
+ {
114
+ name: "position",
115
+ description: "Weight for position-based scoring",
116
+ isOptional: true,
117
+ type: "number (default: 0.2)",
118
+ },
119
+ ],
120
+ },
121
+ ],
122
+ },
123
+ {
124
+ name: "queryEmbedding",
125
+ type: "number[]",
126
+ description: "Embedding of the query",
127
+ isOptional: true,
128
+ },
129
+ {
130
+ name: "topK",
131
+ type: "number",
132
+ description: "Number of top results to return",
133
+ isOptional: true,
134
+ defaultValue: "3",
135
+ },
136
+ ]}
137
+ />
138
+
139
+ ## Returns
140
+
141
+ The function returns an array of `RerankResult` objects:
142
+
143
+ <PropertiesTable
144
+ content={[
145
+ {
146
+ name: "result",
147
+ type: "QueryResult",
148
+ description: "The original query result",
149
+ },
150
+ {
151
+ name: "score",
152
+ type: "number",
153
+ description: "Combined reranking score (0-1)",
154
+ },
155
+ {
156
+ name: "details",
157
+ type: "ScoringDetails",
158
+ description: "Detailed scoring information",
159
+ },
160
+ ]}
161
+ />
162
+
163
+ ### ScoringDetails
164
+
165
+ <PropertiesTable
166
+ content={[
167
+ {
168
+ name: "semantic",
169
+ type: "number",
170
+ description: "Semantic relevance score (0-1)",
171
+ },
172
+ {
173
+ name: "vector",
174
+ type: "number",
175
+ description: "Vector similarity score (0-1)",
176
+ },
177
+ {
178
+ name: "position",
179
+ type: "number",
180
+ description: "Position-based score (0-1)",
181
+ },
182
+ {
183
+ name: "queryAnalysis",
184
+ type: "object",
185
+ description: "Query analysis details",
186
+ isOptional: true,
187
+ properties: [
188
+ {
189
+ type: "number",
190
+ parameters: [
191
+ {
192
+ name: "magnitude",
193
+ description: "Magnitude of the query",
194
+ },
195
+ ],
196
+ },
197
+ {
198
+ type: "number[]",
199
+ parameters: [
200
+ {
201
+ name: "dominantFeatures",
202
+ description: "Dominant features of the query",
203
+ },
204
+ ],
205
+ },
206
+ ],
207
+ },
208
+ ]}
209
+ />
210
+
211
+ ## Related
212
+
213
+ - [createVectorQueryTool](../tools/vector-query-tool)