@mastra/mcp-docs-server 0.13.27 → 0.13.28-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fplayground.md +3 -1
  2. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +10 -10
  3. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +4 -0
  4. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +21 -21
  5. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +20 -0
  6. package/.docs/organized/changelogs/%40mastra%2Fastra.md +10 -10
  7. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +11 -11
  8. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +11 -11
  9. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +41 -41
  10. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +10 -10
  11. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +10 -10
  12. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +10 -10
  13. package/.docs/organized/changelogs/%40mastra%2Fcore.md +97 -97
  14. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +10 -10
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +44 -44
  16. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +31 -31
  17. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +29 -29
  18. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +31 -31
  19. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +75 -75
  20. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +27 -27
  21. package/.docs/organized/changelogs/%40mastra%2Fevals.md +10 -10
  22. package/.docs/organized/changelogs/%40mastra%2Flance.md +10 -10
  23. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +10 -10
  24. package/.docs/organized/changelogs/%40mastra%2Floggers.md +10 -10
  25. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +33 -33
  26. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +10 -10
  27. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +21 -21
  28. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +19 -19
  29. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +10 -10
  30. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +10 -10
  31. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
  32. package/.docs/organized/changelogs/%40mastra%2Fpg.md +19 -19
  33. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +10 -10
  34. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +105 -105
  35. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +11 -11
  36. package/.docs/organized/changelogs/%40mastra%2Frag.md +13 -13
  37. package/.docs/organized/changelogs/%40mastra%2Freact.md +40 -0
  38. package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +21 -0
  39. package/.docs/organized/changelogs/%40mastra%2Fserver.md +41 -41
  40. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +10 -10
  41. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +10 -10
  42. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
  43. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +10 -10
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +11 -11
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +11 -11
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +11 -11
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +10 -10
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +9 -0
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +13 -13
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +10 -10
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +10 -10
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +10 -10
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +11 -11
  54. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +11 -11
  55. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +10 -10
  56. package/.docs/organized/changelogs/create-mastra.md +79 -79
  57. package/.docs/organized/changelogs/mastra.md +118 -118
  58. package/.docs/organized/code-examples/agent.md +2 -1
  59. package/.docs/organized/code-examples/heads-up-game.md +5 -5
  60. package/.docs/raw/agents/guardrails.mdx +335 -0
  61. package/.docs/raw/{networks-vnext/complex-task-execution.mdx → agents/networks.mdx} +29 -9
  62. package/.docs/raw/agents/overview.mdx +107 -63
  63. package/.docs/raw/agents/runtime-context.mdx +11 -16
  64. package/.docs/raw/agents/using-tools-and-mcp.mdx +1 -1
  65. package/.docs/raw/frameworks/agentic-uis/assistant-ui.mdx +9 -2
  66. package/.docs/raw/getting-started/mcp-docs-server.mdx +84 -179
  67. package/.docs/raw/getting-started/model-providers.mdx +5 -3
  68. package/.docs/raw/reference/agents/network.mdx +1 -1
  69. package/.docs/raw/reference/cli/create-mastra.mdx +61 -5
  70. package/.docs/raw/reference/cli/mastra.mdx +252 -0
  71. package/.docs/raw/reference/client-js/agents.mdx +1 -10
  72. package/.docs/raw/reference/processors/batch-parts-processor.mdx +111 -0
  73. package/.docs/raw/reference/processors/language-detector.mdx +154 -0
  74. package/.docs/raw/reference/processors/moderation-processor.mdx +145 -0
  75. package/.docs/raw/reference/processors/pii-detector.mdx +153 -0
  76. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +130 -0
  77. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +145 -0
  78. package/.docs/raw/reference/processors/token-limiter-processor.mdx +136 -0
  79. package/.docs/raw/reference/processors/unicode-normalizer.mdx +114 -0
  80. package/.docs/raw/reference/streaming/ChunkType.mdx +2 -6
  81. package/.docs/raw/reference/streaming/agents/MastraModelOutput.mdx +1 -5
  82. package/.docs/raw/reference/streaming/workflows/resumeStreamVNext.mdx +1 -1
  83. package/.docs/raw/reference/streaming/workflows/stream.mdx +1 -1
  84. package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +1 -1
  85. package/.docs/raw/reference/workflows/run-methods/resume.mdx +17 -1
  86. package/.docs/raw/reference/workflows/run-methods/start.mdx +17 -1
  87. package/.docs/raw/reference/workflows/step.mdx +11 -0
  88. package/.docs/raw/reference/workflows/workflow.mdx +7 -1
  89. package/.docs/raw/server-db/local-dev-playground.mdx +1 -1
  90. package/.docs/raw/workflows/overview.mdx +22 -5
  91. package/CHANGELOG.md +24 -0
  92. package/package.json +5 -5
  93. package/.docs/raw/agents/input-processors.mdx +0 -284
  94. package/.docs/raw/agents/output-processors.mdx +0 -328
  95. package/.docs/raw/networks-vnext/overview.mdx +0 -85
  96. package/.docs/raw/networks-vnext/single-task-execution.mdx +0 -135
  97. package/.docs/raw/reference/cli/build.mdx +0 -115
  98. package/.docs/raw/reference/cli/dev.mdx +0 -249
  99. package/.docs/raw/reference/cli/init.mdx +0 -97
  100. package/.docs/raw/reference/cli/lint.mdx +0 -56
  101. package/.docs/raw/reference/cli/mcp-docs-server.mdx +0 -82
  102. package/.docs/raw/reference/cli/scorers.mdx +0 -160
  103. package/.docs/raw/reference/cli/start.mdx +0 -50
@@ -0,0 +1,252 @@
1
+ ---
2
+ title: "Reference: CLI Commands"
3
+ description: Documentation for the Mastra CLI to develop, build, and start your project.
4
+ ---
5
+
6
+ import { Callout } from "nextra/components";
7
+
8
+ # CLI Commands
9
+
10
+ You can use the Command-Line Interface (CLI) provided by Mastra to develop, build, and start your Mastra project.
11
+
12
+ ## `mastra dev`
13
+
14
+ Starts a server which exposes a [local dev playground](/docs/server-db/local-dev-playground) and REST endpoints for your agents, tools, and workflows. You can visit [http://localhost:4111/swagger-ui](http://localhost:4111/swagger-ui) for an overview of all available endpoints once `mastra dev` is running.
15
+
16
+ You can also [configure the server](/docs/server-db/local-dev-playground#configuration).
17
+
18
+ ### Flags
19
+
20
+ The command accepts [common flags][common-flags] and the following additional flags:
21
+
22
+ #### `--https`
23
+
24
+ Enable local HTTPS support. [Learn more](/docs/server-db/local-dev-playground#local-https).
25
+
26
+ #### `--inspect`
27
+
28
+ Start the development server in inspect mode, helpful for debugging. This can't be used together with `--inspect-brk`.
29
+
30
+ #### `--inspect-brk`
31
+
32
+ Start the development server in inspect mode and break at the beginning of the script. This can't be used together with `--inspect`.
33
+
34
+ #### `--custom-args`
35
+
36
+ Comma-separated list of custom arguments to pass to the development server. You can pass arguments to the Node.js process, e.g. `--experimental-transform-types`.
37
+
38
+ ### Configs
39
+
40
+ You can set certain environment variables to modify the behavior of `mastra dev`.
41
+
42
+ #### Disable build caching
43
+
44
+ Set `MASTRA_DEV_NO_CACHE=1` to force a full rebuild rather than using the cached assets under `.mastra/`:
45
+
46
+ ```bash copy
47
+ MASTRA_DEV_NO_CACHE=1 mastra dev
48
+ ```
49
+
50
+ This helps when you are debugging bundler plugins or suspect stale output.
51
+
52
+ #### Limit parallelism
53
+
54
+ `MASTRA_CONCURRENCY` caps how many expensive operations run in parallel (primarily build and evaluation steps). For example:
55
+
56
+ ```bash copy
57
+ MASTRA_CONCURRENCY=4 mastra dev
58
+ ```
59
+
60
+ Leave it unset to let the CLI pick a sensible default for the machine.
61
+
62
+ #### Custom provider endpoints
63
+
64
+ When using providers supported by the Vercel AI SDK you can redirect requests through proxies or internal gateways by setting a base URL. For OpenAI:
65
+
66
+ ```bash copy
67
+ OPENAI_API_KEY=<your-api-key> \
68
+ OPENAI_BASE_URL=https://openrouter.example/v1 \
69
+ mastra dev
70
+ ```
71
+
72
+ For Anthropic:
73
+
74
+ ```bash copy
75
+ ANTHROPIC_API_KEY=<your-api-key> \
76
+ ANTHROPIC_BASE_URL=https://anthropic.internal \
77
+ mastra dev
78
+ ```
79
+
80
+ These are forwarded by the AI SDK and work with any `openai()` or `anthropic()` calls.
81
+
82
+ ## `mastra build`
83
+
84
+ The `mastra build` command bundles your Mastra project into a production-ready Hono server. [Hono](https://hono.dev/) is a lightweight, type-safe web framework that makes it easy to deploy Mastra agents as HTTP endpoints with middleware support.
85
+
86
+ Under the hood Mastra's Rollup server locates your Mastra entry file and bundles it to a production-ready Hono server. During that bundling it tree-shakes your code and generates source maps for debugging.
87
+
88
+ The output in `.mastra` can be deployed to any cloud server using [`mastra start`](#mastra-start).
89
+
90
+ If you're deploying to a [serverless platform](/docs/deployment/serverless-platforms) you need to install the correct deployer in order to receive the correct output in `.mastra`.
91
+
92
+ It accepts [common flags][common-flags].
93
+
94
+ ### Configs
95
+
96
+ You can set certain environment variables to modify the behavior of `mastra build`.
97
+
98
+ #### Limit parallelism
99
+
100
+ For CI or when running in resource constrained environments you can cap how many expensive tasks run at once by setting `MASTRA_CONCURRENCY`.
101
+
102
+ ```bash copy
103
+ MASTRA_CONCURRENCY=2 mastra build
104
+ ```
105
+
106
+ ## `mastra start`
107
+
108
+ <Callout type="info">
109
+ You need to run `mastra build` before using `mastra start`.
110
+ </Callout>
111
+
112
+ Starts a local server to serve your built Mastra application in production mode. By default, [OTEL Tracing](/docs/observability/otel-tracing) is enabled.
113
+
114
+ ### Flags
115
+
116
+ The command accepts [common flags][common-flags] and the following additional flags:
117
+
118
+ #### `--dir`
119
+
120
+ The path to your built Mastra output directory. Defaults to `.mastra/output`.
121
+
122
+ #### `--no-telemetry`
123
+
124
+ Disable the [OTEL Tracing](/docs/observability/otel-tracing).
125
+
126
+ ## `mastra lint`
127
+
128
+ The `mastra lint` command validates the structure and code of your Mastra project to ensure it follows best practices and is error-free.
129
+
130
+ It accepts [common flags][common-flags].
131
+
132
+ ## `mastra scorers`
133
+
134
+ The `mastra scorers` command provides management capabilities for evaluation scorers that measure the quality, accuracy, and performance of AI-generated outputs.
135
+
136
+ Read the [Scorers overview](/docs/scorers/overview) to learn more.
137
+
138
+ ### `add`
139
+
140
+ Add a new scorer to your project. You can use an interactive prompt:
141
+
142
+ ```bash copy
143
+ mastra scorers add
144
+ ```
145
+
146
+ Or provide a scorer name directly:
147
+
148
+ ```bash copy
149
+ mastra scorers add answer-relevancy
150
+ ```
151
+
152
+ Use the [`list`](#list) command to get the correct ID.
153
+
154
+ ### `list`
155
+
156
+ List all available scorer templates. Use the ID for the `add` command.
157
+
158
+ ## `mastra init`
159
+
160
+ The `mastra init` command initializes Mastra in an existing project. Use this command to scaffold the necessary folders and configuration without generating a new project from scratch.
161
+
162
+ ### Flags
163
+
164
+ The command accepts the following additional flags:
165
+
166
+ #### `--default`
167
+
168
+ Creates files inside `src` using OpenAI. It also populates the `src/mastra` folders with example code.
169
+
170
+ #### `--dir`
171
+
172
+ The directory where Mastra files should be saved to. Defaults to `src`.
173
+
174
+ #### `--components`
175
+
176
+ Comma-separated list of components to add. For each component a new folder will be created. Defaults to `['agents', 'tools', 'workflows']`.
177
+
178
+ #### `--llm`
179
+
180
+ Default model provider. Choose from: `"openai" | "anthropic" | "groq" | "google" | "cerebras" | "mistral"`.
181
+
182
+ #### `--llm-api-key`
183
+
184
+ The API key for your chosen model provider. Will be written to an environment variables file (`.env`).
185
+
186
+ #### `--example`
187
+
188
+ If enabled, example code is written to the list of components (e.g. example agent code).
189
+
190
+ #### `--no-example`
191
+
192
+ Do not include example code. Useful when using the `--default` flag.
193
+
194
+ #### `--mcp`
195
+
196
+ Configure your code editor with Mastra's MCP server. Choose from: `"cursor" | "cursor-global" | "windsurf" | "vscode"`.
197
+
198
+ ## Common flags
199
+
200
+ ### `--dir`
201
+
202
+ **Available in:** `dev`, `build`, `lint`
203
+
204
+ The path to your Mastra folder. Defaults to `src/mastra`.
205
+
206
+ ### `--env`
207
+
208
+ **Available in:** `dev`, `build`, `start`
209
+
210
+ Custom environment variables file to include. By default, includes `.env.development`, `.env.local`, and `.env`.
211
+
212
+ ### `--root`
213
+
214
+ **Available in:** `dev`, `build`, `lint`
215
+
216
+ Path to your root folder. Defaults to `process.cwd()`.
217
+
218
+ ### `--tools`
219
+
220
+ **Available in:** `dev`, `build`, `lint`
221
+
222
+ Comma-separated list of tool paths to include. Defaults to `src/mastra/tools`.
223
+
224
+ ## Global flags
225
+
226
+ Use these flags to get information about the `mastra` CLI.
227
+
228
+ ### `--version`
229
+
230
+ Prints the Mastra CLI version and exits.
231
+
232
+ ### `--help`
233
+
234
+ Prints help message and exits.
235
+
236
+ ## Telemetry
237
+
238
+ By default, Mastra collects anonymous information about your project like your OS, Mastra version or Node.js version. You can read the [source code](https://github.com/mastra-ai/mastra/blob/main/packages/cli/src/analytics/index.ts) to check what's collected.
239
+
240
+ You can opt out of the CLI analytics by setting an environment variable:
241
+
242
+ ```bash copy
243
+ MASTRA_TELEMETRY_DISABLED=1
244
+ ```
245
+
246
+ You can also set this while using other `mastra` commands:
247
+
248
+ ```bash copy
249
+ MASTRA_TELEMETRY_DISABLED=1 mastra dev
250
+ ```
251
+
252
+ [common-flags]: #common-flags
@@ -163,7 +163,7 @@ const liveEvals = await agent.liveEvals();
163
163
  ### Stream
164
164
 
165
165
 
166
- Stream responses using the enhanced API with improved method signatures. This method provides enhanced capabilities and format flexibility, with support for both Mastra's native format and AI SDK v5 compatibility:
166
+ Stream responses using the enhanced API with improved method signatures. This method provides enhanced capabilities and format flexibility, with support for Mastra's native format.
167
167
 
168
168
  ```typescript
169
169
  const response = await agent.stream(
@@ -174,15 +174,6 @@ const response = await agent.stream(
174
174
  }
175
175
  );
176
176
 
177
- // AI SDK v5 compatible format
178
- const response = await agent.stream(
179
- "Tell me a story",
180
- {
181
- format: 'aisdk', // Enable AI SDK v5 compatibility
182
- threadId: "thread-1",
183
- }
184
- );
185
-
186
177
  // Process the stream
187
178
  response.processDataStream({
188
179
  onChunk: (chunk) => {
@@ -0,0 +1,111 @@
1
+ ---
2
+ title: "Reference: Batch Parts Processor | Processors | Mastra Docs"
3
+ description: "Documentation for the BatchPartsProcessor in Mastra, which batches multiple stream parts together to reduce frequency of emissions."
4
+ ---
5
+
6
+ # BatchPartsProcessor
7
+
8
+ The `BatchPartsProcessor` is an **output processor** that batches multiple stream parts together to reduce the frequency of emissions during streaming. This processor is useful for reducing network overhead, improving user experience by consolidating small text chunks, and optimizing streaming performance by controlling when parts are emitted to the client.
9
+
10
+ ## Usage example
11
+
12
+ ```typescript copy
13
+ import { BatchPartsProcessor } from "@mastra/core/processors";
14
+
15
+ const processor = new BatchPartsProcessor({
16
+ batchSize: 5,
17
+ maxWaitTime: 100,
18
+ emitOnNonText: true
19
+ });
20
+ ```
21
+
22
+ ## Constructor parameters
23
+
24
+ <PropertiesTable
25
+ content={[
26
+ {
27
+ name: "options",
28
+ type: "Options",
29
+ description: "Configuration options for batching stream parts",
30
+ isOptional: true,
31
+ },
32
+ ]}
33
+ />
34
+
35
+ ### Options
36
+
37
+ <PropertiesTable
38
+ content={[
39
+ {
40
+ name: "batchSize",
41
+ type: "number",
42
+ description: "Number of parts to batch together before emitting",
43
+ isOptional: true,
44
+ default: "5",
45
+ },
46
+ {
47
+ name: "maxWaitTime",
48
+ type: "number",
49
+ description: "Maximum time to wait before emitting a batch (in milliseconds). If set, will emit the current batch even if it hasn't reached batchSize",
50
+ isOptional: true,
51
+ default: "undefined (no timeout)",
52
+ },
53
+ {
54
+ name: "emitOnNonText",
55
+ type: "boolean",
56
+ description: "Whether to emit immediately when a non-text part is encountered",
57
+ isOptional: true,
58
+ default: "true",
59
+ },
60
+ ]}
61
+ />
62
+
63
+ ## Returns
64
+
65
+ <PropertiesTable
66
+ content={[
67
+ {
68
+ name: "name",
69
+ type: "string",
70
+ description: "Processor name set to 'batch-parts'",
71
+ isOptional: false,
72
+ },
73
+ {
74
+ name: "processOutputStream",
75
+ type: "(args: { part: ChunkType; streamParts: ChunkType[]; state: Record<string, any>; abort: (reason?: string) => never }) => Promise<ChunkType | null>",
76
+ description: "Processes streaming output parts to batch them together",
77
+ isOptional: false,
78
+ },
79
+ {
80
+ name: "flush",
81
+ type: "(state?: BatchPartsState) => ChunkType | null",
82
+ description: "Force flush any remaining batched parts when the stream ends",
83
+ isOptional: false,
84
+ },
85
+ ]}
86
+ />
87
+
88
+ ## Extended usage example
89
+
90
+ ```typescript filename="src/mastra/agents/batched-agent.ts" showLineNumbers copy
91
+ import { openai } from "@ai-sdk/openai";
92
+ import { Agent } from "@mastra/core/agent";
93
+ import { BatchPartsProcessor } from "@mastra/core/processors";
94
+
95
+ export const agent = new Agent({
96
+ name: "batched-agent",
97
+ instructions: "You are a helpful assistant",
98
+ model: openai("gpt-4o-mini"),
99
+ outputProcessors: [
100
+ new BatchPartsProcessor({
101
+ batchSize: 5,
102
+ maxWaitTime: 100,
103
+ emitOnNonText: true
104
+ })
105
+ ]
106
+ });
107
+ ```
108
+
109
+ ## Related
110
+
111
+ - [Output Processors documentation](/docs/agents/output-processors)
@@ -0,0 +1,154 @@
1
+ ---
2
+ title: "Reference: Language Detector | Processors | Mastra Docs"
3
+ description: "Documentation for the LanguageDetector in Mastra, which detects language and can translate content in AI responses."
4
+ ---
5
+
6
+ # LanguageDetector
7
+
8
+ The `LanguageDetector` is an **input processor** that identifies the language of input text and optionally translates it to a target language for consistent processing. This processor helps maintain language consistency by detecting the language of incoming messages and providing flexible strategies for handling multilingual content, including automatic translation to ensure all content is processed in the target language.
9
+
10
+ ## Usage example
11
+
12
+ ```typescript copy
13
+ import { openai } from "@ai-sdk/openai";
14
+ import { LanguageDetector } from "@mastra/core/processors";
15
+
16
+ const processor = new LanguageDetector({
17
+ model: openai("gpt-4.1-nano"),
18
+ targetLanguages: ["English", "en"],
19
+ threshold: 0.8,
20
+ strategy: "translate"
21
+ });
22
+ ```
23
+
24
+ ## Constructor parameters
25
+
26
+ <PropertiesTable
27
+ content={[
28
+ {
29
+ name: "options",
30
+ type: "Options",
31
+ description: "Configuration options for language detection and translation",
32
+ isOptional: false,
33
+ },
34
+ ]}
35
+ />
36
+
37
+ ### Options
38
+
39
+ <PropertiesTable
40
+ content={[
41
+ {
42
+ name: "model",
43
+ type: "MastraLanguageModel",
44
+ description: "Model configuration for the detection/translation agent",
45
+ isOptional: false,
46
+ },
47
+ {
48
+ name: "targetLanguages",
49
+ type: "string[]",
50
+ description: "Target language(s) for the project. If content is detected in a different language, it may be translated. Can be language name ('English') or ISO code ('en')",
51
+ isOptional: true,
52
+ default: "['English', 'en']",
53
+ },
54
+ {
55
+ name: "threshold",
56
+ type: "number",
57
+ description: "Confidence threshold for language detection (0-1). Only process when detection confidence exceeds this threshold",
58
+ isOptional: true,
59
+ default: "0.7",
60
+ },
61
+ {
62
+ name: "strategy",
63
+ type: "'detect' | 'translate' | 'block' | 'warn'",
64
+ description: "Strategy when non-target language is detected: 'detect' only detects language, 'translate' automatically translates to target language, 'block' rejects content not in target language, 'warn' logs warning but allows through",
65
+ isOptional: true,
66
+ default: "'detect'",
67
+ },
68
+ {
69
+ name: "preserveOriginal",
70
+ type: "boolean",
71
+ description: "Whether to preserve original content in message metadata. Useful for audit trails and debugging",
72
+ isOptional: true,
73
+ default: "true",
74
+ },
75
+ {
76
+ name: "instructions",
77
+ type: "string",
78
+ description: "Custom detection instructions for the agent. If not provided, uses default instructions",
79
+ isOptional: true,
80
+ default: "undefined",
81
+ },
82
+ {
83
+ name: "minTextLength",
84
+ type: "number",
85
+ description: "Minimum text length to perform detection. Short text is often unreliable for language detection",
86
+ isOptional: true,
87
+ default: "10",
88
+ },
89
+ {
90
+ name: "includeDetectionDetails",
91
+ type: "boolean",
92
+ description: "Whether to include detailed detection info in logs",
93
+ isOptional: true,
94
+ default: "false",
95
+ },
96
+ {
97
+ name: "translationQuality",
98
+ type: "'speed' | 'quality' | 'balanced'",
99
+ description: "Translation quality preference: 'speed' prioritizes fast translation, 'quality' prioritizes accuracy, 'balanced' balances between speed and quality",
100
+ isOptional: true,
101
+ default: "'quality'",
102
+ },
103
+ ]}
104
+ />
105
+
106
+ ## Returns
107
+
108
+ <PropertiesTable
109
+ content={[
110
+ {
111
+ name: "name",
112
+ type: "string",
113
+ description: "Processor name set to 'language-detector'",
114
+ isOptional: false,
115
+ },
116
+ {
117
+ name: "processInput",
118
+ type: "(args: { messages: MastraMessageV2[]; abort: (reason?: string) => never; tracingContext?: TracingContext }) => Promise<MastraMessageV2[]>",
119
+ description: "Processes input messages to detect language and optionally translate content before sending to LLM",
120
+ isOptional: false,
121
+ },
122
+ ]}
123
+ />
124
+
125
+ ## Extended usage example
126
+
127
+ ```typescript filename="src/mastra/agents/multilingual-agent.ts" showLineNumbers copy
128
+ import { openai } from "@ai-sdk/openai";
129
+ import { Agent } from "@mastra/core/agent";
130
+ import { LanguageDetector } from "@mastra/core/processors";
131
+
132
+ export const agent = new Agent({
133
+ name: "multilingual-agent",
134
+ instructions: "You are a helpful assistant",
135
+ model: openai("gpt-4o-mini"),
136
+ inputProcessors: [
137
+ new LanguageDetector({
138
+ model: openai("gpt-4.1-nano"),
139
+ targetLanguages: ["English", "en"],
140
+ threshold: 0.8,
141
+ strategy: "translate",
142
+ preserveOriginal: true,
143
+ instructions: "Detect language and translate non-English content to English while preserving original intent",
144
+ minTextLength: 10,
145
+ includeDetectionDetails: true,
146
+ translationQuality: "quality"
147
+ })
148
+ ]
149
+ });
150
+ ```
151
+
152
+ ## Related
153
+
154
+ - [Input Processors](/docs/agents/input-processors)
@@ -0,0 +1,145 @@
1
+ ---
2
+ title: "Reference: Moderation Processor | Processors | Mastra Docs"
3
+ description: "Documentation for the ModerationProcessor in Mastra, which provides content moderation using LLM to detect inappropriate content across multiple categories."
4
+ ---
5
+
6
+ # ModerationProcessor
7
+
8
+ The `ModerationProcessor` is a **hybrid processor** that can be used for both input and output processing to provide content moderation using an LLM to detect inappropriate content across multiple categories. This processor helps maintain content safety by evaluating messages against configurable moderation categories with flexible strategies for handling flagged content.
9
+
10
+ ## Usage example
11
+
12
+ ```typescript copy
13
+ import { openai } from "@ai-sdk/openai";
14
+ import { ModerationProcessor } from "@mastra/core/processors";
15
+
16
+ const processor = new ModerationProcessor({
17
+ model: openai("gpt-4.1-nano"),
18
+ threshold: 0.7,
19
+ strategy: "block",
20
+ categories: ["hate", "harassment", "violence"]
21
+ });
22
+ ```
23
+
24
+ ## Constructor parameters
25
+
26
+ <PropertiesTable
27
+ content={[
28
+ {
29
+ name: "options",
30
+ type: "Options",
31
+ description: "Configuration options for content moderation",
32
+ isOptional: false,
33
+ },
34
+ ]}
35
+ />
36
+
37
+ ### Options
38
+
39
+ <PropertiesTable
40
+ content={[
41
+ {
42
+ name: "model",
43
+ type: "MastraLanguageModel",
44
+ description: "Model configuration for the moderation agent",
45
+ isOptional: false,
46
+ },
47
+ {
48
+ name: "categories",
49
+ type: "string[]",
50
+ description: "Categories to check for moderation. If not specified, uses default OpenAI categories",
51
+ isOptional: true,
52
+ default: "['hate', 'hate/threatening', 'harassment', 'harassment/threatening', 'self-harm', 'self-harm/intent', 'self-harm/instructions', 'sexual', 'sexual/minors', 'violence', 'violence/graphic']",
53
+ },
54
+ {
55
+ name: "threshold",
56
+ type: "number",
57
+ description: "Confidence threshold for flagging (0-1). Content is flagged if any category score exceeds this threshold",
58
+ isOptional: true,
59
+ default: "0.5",
60
+ },
61
+ {
62
+ name: "strategy",
63
+ type: "'block' | 'warn' | 'filter'",
64
+ description: "Strategy when content is flagged: 'block' rejects with error, 'warn' logs warning but allows through, 'filter' removes flagged messages",
65
+ isOptional: true,
66
+ default: "'block'",
67
+ },
68
+ {
69
+ name: "instructions",
70
+ type: "string",
71
+ description: "Custom moderation instructions for the agent. If not provided, uses default instructions based on categories",
72
+ isOptional: true,
73
+ default: "undefined",
74
+ },
75
+ {
76
+ name: "includeScores",
77
+ type: "boolean",
78
+ description: "Whether to include confidence scores in logs. Useful for tuning thresholds and debugging",
79
+ isOptional: true,
80
+ default: "false",
81
+ },
82
+ {
83
+ name: "chunkWindow",
84
+ type: "number",
85
+ description: "Number of previous chunks to include for context when moderating stream chunks. If set to 1, includes the previous part, etc.",
86
+ isOptional: true,
87
+ default: "0 (no context window)",
88
+ },
89
+ ]}
90
+ />
91
+
92
+ ## Returns
93
+
94
+ <PropertiesTable
95
+ content={[
96
+ {
97
+ name: "name",
98
+ type: "string",
99
+ description: "Processor name set to 'moderation'",
100
+ isOptional: false,
101
+ },
102
+ {
103
+ name: "processInput",
104
+ type: "(args: { messages: MastraMessageV2[]; abort: (reason?: string) => never; tracingContext?: TracingContext }) => Promise<MastraMessageV2[]>",
105
+ description: "Processes input messages to moderate content before sending to LLM",
106
+ isOptional: false,
107
+ },
108
+ {
109
+ name: "processOutputStream",
110
+ type: "(args: { part: ChunkType; streamParts: ChunkType[]; state: Record<string, any>; abort: (reason?: string) => never; tracingContext?: TracingContext }) => Promise<ChunkType | null | undefined>",
111
+ description: "Processes streaming output parts to moderate content during streaming",
112
+ isOptional: false,
113
+ },
114
+ ]}
115
+ />
116
+
117
+ ## Extended usage example
118
+
119
+ ```typescript filename="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
120
+ import { openai } from "@ai-sdk/openai";
121
+ import { Agent } from "@mastra/core/agent";
122
+ import { ModerationProcessor } from "@mastra/core/processors";
123
+
124
+ export const agent = new Agent({
125
+ name: "moderated-agent",
126
+ instructions: "You are a helpful assistant",
127
+ model: openai("gpt-4o-mini"),
128
+ inputProcessors: [
129
+ new ModerationProcessor({
130
+ model: openai("gpt-4.1-nano"),
131
+ categories: ["hate", "harassment", "violence"],
132
+ threshold: 0.7,
133
+ strategy: "block",
134
+ instructions: "Detect and flag inappropriate content in user messages",
135
+ includeScores: true,
136
+ chunkWindow: 1
137
+ })
138
+ ]
139
+ });
140
+ ```
141
+
142
+ ## Related
143
+
144
+ - [Input Processors](/docs/agents/input-processors)
145
+ - [Output Processors](/docs/agents/output-processors)