@mastra/mcp-docs-server 0.13.28-alpha.0 → 0.13.28-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +21 -21
  2. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +18 -0
  3. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +11 -11
  4. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +26 -26
  5. package/.docs/organized/changelogs/%40mastra%2Fcore.md +53 -53
  6. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +29 -29
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +11 -11
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +11 -11
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +11 -11
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +41 -41
  11. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +11 -11
  12. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +25 -25
  13. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +10 -10
  14. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +21 -21
  15. package/.docs/organized/changelogs/%40mastra%2Fpg.md +10 -10
  16. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +52 -52
  17. package/.docs/organized/changelogs/%40mastra%2Freact.md +29 -0
  18. package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
  19. package/.docs/organized/changelogs/%40mastra%2Fserver.md +28 -28
  20. package/.docs/organized/changelogs/create-mastra.md +21 -21
  21. package/.docs/organized/changelogs/mastra.md +42 -42
  22. package/.docs/organized/code-examples/agent.md +2 -1
  23. package/.docs/organized/code-examples/heads-up-game.md +5 -5
  24. package/.docs/raw/agents/guardrails.mdx +335 -0
  25. package/.docs/raw/{networks-vnext/complex-task-execution.mdx → agents/networks.mdx} +29 -9
  26. package/.docs/raw/agents/overview.mdx +107 -63
  27. package/.docs/raw/frameworks/agentic-uis/assistant-ui.mdx +9 -2
  28. package/.docs/raw/getting-started/mcp-docs-server.mdx +84 -179
  29. package/.docs/raw/reference/agents/network.mdx +1 -1
  30. package/.docs/raw/reference/cli/create-mastra.mdx +61 -5
  31. package/.docs/raw/reference/cli/mastra.mdx +252 -0
  32. package/.docs/raw/reference/client-js/agents.mdx +1 -10
  33. package/.docs/raw/reference/processors/batch-parts-processor.mdx +111 -0
  34. package/.docs/raw/reference/processors/language-detector.mdx +154 -0
  35. package/.docs/raw/reference/processors/moderation-processor.mdx +145 -0
  36. package/.docs/raw/reference/processors/pii-detector.mdx +153 -0
  37. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +130 -0
  38. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +145 -0
  39. package/.docs/raw/reference/processors/token-limiter-processor.mdx +136 -0
  40. package/.docs/raw/reference/processors/unicode-normalizer.mdx +114 -0
  41. package/.docs/raw/reference/streaming/workflows/resumeStreamVNext.mdx +1 -1
  42. package/.docs/raw/reference/streaming/workflows/stream.mdx +1 -1
  43. package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +1 -1
  44. package/.docs/raw/reference/workflows/run-methods/resume.mdx +17 -1
  45. package/.docs/raw/reference/workflows/run-methods/start.mdx +17 -1
  46. package/.docs/raw/reference/workflows/step.mdx +11 -0
  47. package/.docs/raw/reference/workflows/workflow.mdx +7 -1
  48. package/.docs/raw/server-db/local-dev-playground.mdx +1 -1
  49. package/CHANGELOG.md +24 -0
  50. package/package.json +5 -5
  51. package/.docs/raw/agents/input-processors.mdx +0 -284
  52. package/.docs/raw/agents/output-processors.mdx +0 -328
  53. package/.docs/raw/networks-vnext/overview.mdx +0 -85
  54. package/.docs/raw/networks-vnext/single-task-execution.mdx +0 -135
  55. package/.docs/raw/reference/cli/build.mdx +0 -115
  56. package/.docs/raw/reference/cli/dev.mdx +0 -249
  57. package/.docs/raw/reference/cli/init.mdx +0 -97
  58. package/.docs/raw/reference/cli/lint.mdx +0 -56
  59. package/.docs/raw/reference/cli/mcp-docs-server.mdx +0 -82
  60. package/.docs/raw/reference/cli/scorers.mdx +0 -160
  61. package/.docs/raw/reference/cli/start.mdx +0 -50
@@ -96,7 +96,7 @@ const stream = run.streamVNext({
96
96
  },
97
97
  {
98
98
  name: "stream.result",
99
- type: "Promise<WorkflowResult<TOutput, TSteps>>",
99
+ type: "Promise<WorkflowResult<TState, TOutput, TSteps>>",
100
100
  description: "A promise that resolves to the final workflow result",
101
101
  },
102
102
  {
@@ -80,6 +80,22 @@ if (result.status === "suspended") {
80
80
  }
81
81
  ]
82
82
  },
83
+ {
84
+ name: "outputOptions",
85
+ type: "OutputOptions",
86
+ isOptional: true,
87
+ description: "Options for AI tracing configuration.",
88
+ properties: [
89
+ {
90
+ parameters: [{
91
+ name: "includeState",
92
+ type: "boolean",
93
+ isOptional: true,
94
+ description: "Whether to include the workflow run state in the result."
95
+ }]
96
+ }
97
+ ]
98
+ },
83
99
  ]}
84
100
  />
85
101
 
@@ -89,7 +105,7 @@ if (result.status === "suspended") {
89
105
  content={[
90
106
  {
91
107
  name: "result",
92
- type: "Promise<WorkflowResult<TOutput, TSteps>>",
108
+ type: "Promise<WorkflowResult<TState, TOutput, TSteps>>",
93
109
  description: "A promise that resolves to the workflow execution result containing step outputs and status",
94
110
  },
95
111
  {
@@ -73,6 +73,22 @@ const result = await run.start({
73
73
  }
74
74
  ]
75
75
  },
76
+ {
77
+ name: "outputOptions",
78
+ type: "OutputOptions",
79
+ isOptional: true,
80
+ description: "Options for AI tracing configuration.",
81
+ properties: [
82
+ {
83
+ parameters: [{
84
+ name: "includeState",
85
+ type: "boolean",
86
+ isOptional: true,
87
+ description: "Whether to include the workflow run state in the result."
88
+ }]
89
+ }
90
+ ]
91
+ },
76
92
  ]}
77
93
  />
78
94
 
@@ -82,7 +98,7 @@ const result = await run.start({
82
98
  content={[
83
99
  {
84
100
  name: "result",
85
- type: "Promise<WorkflowResult<TOutput, TSteps>>",
101
+ type: "Promise<WorkflowResult<TState, TOutput, TSteps>>",
86
102
  description: "A promise that resolves to the workflow execution result containing step outputs and status",
87
103
  },
88
104
  {
@@ -72,6 +72,12 @@ const step1 = createStep({
72
72
  description: "Optional Zod schema for suspending the step",
73
73
  required: false,
74
74
  },
75
+ {
76
+ name: "stateSchema",
77
+ type: "z.ZodObject<any>",
78
+ description: "Optional Zod schema for the step state. Automatically injected when using Mastra's state system. The stateSchema must be a subset of the workflow's stateSchema. If not specified, type is 'any'.",
79
+ required: false,
80
+ },
75
81
  {
76
82
  name: "execute",
77
83
  type: "(params: ExecuteParams) => Promise<any>",
@@ -117,6 +123,11 @@ const step1 = createStep({
117
123
  type: "() => Promise<void>",
118
124
  description: "Function to pause workflow execution",
119
125
  },
126
+ {
127
+ name: "setState",
128
+ type: "(state: z.infer<TState>) => void",
129
+ description: "Function to set the state of the workflow. Inject via reducer-like pattern, such as 'setState({ ...state, ...newState })'",
130
+ },
120
131
  {
121
132
  name: "runId",
122
133
  type: "string",
@@ -42,7 +42,13 @@ export const workflow = createWorkflow({
42
42
  name: "outputSchema",
43
43
  type: "z.ZodType<any>",
44
44
  description: "Zod schema defining the output structure for the workflow",
45
- }
45
+ },
46
+ {
47
+ name: "stateSchema",
48
+ type: "z.ZodObject<any>",
49
+ description: "Optional Zod schema for the workflow state. Automatically injected when using Mastra's state system. If not specified, type is 'any'.",
50
+ isOptional: true,
51
+ },
46
52
  ]}
47
53
  />
48
54
 
@@ -118,7 +118,7 @@ Key features:
118
118
 
119
119
  The local development server exposes a set of REST API routes via the [Mastra Server](/docs/deployment/server-deployment), allowing you to test and interact with your agents and workflows before deployment.
120
120
 
121
- For a full overview of available API routes, including agents, tools, and workflows, see the [Routes reference](/reference/cli/dev#routes).
121
+ For a full overview of available API routes, including agents, tools, and workflows, visit [http://localhost:4111/swagger-ui](http://localhost:4111/swagger-ui) during `mastra dev`.
122
122
 
123
123
  ## OpenAPI Specification
124
124
 
package/CHANGELOG.md CHANGED
@@ -1,5 +1,29 @@
1
1
  # @mastra/mcp-docs-server
2
2
 
3
+ ## 0.13.28-alpha.3
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [[`a6d69c5`](https://github.com/mastra-ai/mastra/commit/a6d69c5fb50c0875b46275811fece5862f03c6a0), [`84199af`](https://github.com/mastra-ai/mastra/commit/84199af8673f6f9cb59286ffb5477a41932775de), [`7f431af`](https://github.com/mastra-ai/mastra/commit/7f431afd586b7d3265075e73106eb73167edbb86)]:
8
+ - @mastra/core@0.20.1-alpha.3
9
+
10
+ ## 0.13.28-alpha.2
11
+
12
+ ### Patch Changes
13
+
14
+ - Updated dependencies [[`ee9108f`](https://github.com/mastra-ai/mastra/commit/ee9108fa29bb8368fc23df158c9f0645b2d7b65c)]:
15
+ - @mastra/core@0.20.1-alpha.2
16
+
17
+ ## 0.13.28-alpha.1
18
+
19
+ ### Patch Changes
20
+
21
+ - Mutable shared workflow run state ([#8545](https://github.com/mastra-ai/mastra/pull/8545))
22
+
23
+ - Updated dependencies [[`c621613`](https://github.com/mastra-ai/mastra/commit/c621613069173c69eb2c3ef19a5308894c6549f0), [`12b1189`](https://github.com/mastra-ai/mastra/commit/12b118942445e4de0dd916c593e33ec78dc3bc73), [`4783b30`](https://github.com/mastra-ai/mastra/commit/4783b3063efea887825514b783ba27f67912c26d), [`076b092`](https://github.com/mastra-ai/mastra/commit/076b0924902ff0f49d5712d2df24c4cca683713f), [`2aee9e7`](https://github.com/mastra-ai/mastra/commit/2aee9e7d188b8b256a4ddc203ccefb366b4867fa), [`c582906`](https://github.com/mastra-ai/mastra/commit/c5829065a346260f96c4beb8af131b94804ae3ad), [`fa2eb96`](https://github.com/mastra-ai/mastra/commit/fa2eb96af16c7d433891a73932764960d3235c1d), [`4783b30`](https://github.com/mastra-ai/mastra/commit/4783b3063efea887825514b783ba27f67912c26d), [`a739d0c`](https://github.com/mastra-ai/mastra/commit/a739d0c8b37cd89569e04a6ca0827083c6167e19), [`603e927`](https://github.com/mastra-ai/mastra/commit/603e9279db8bf8a46caf83881c6b7389ccffff7e), [`cd45982`](https://github.com/mastra-ai/mastra/commit/cd4598291cda128a88738734ae6cbef076ebdebd), [`874f74d`](https://github.com/mastra-ai/mastra/commit/874f74da4b1acf6517f18132d035612c3ecc394a), [`0baf2ba`](https://github.com/mastra-ai/mastra/commit/0baf2bab8420277072ef1f95df5ea7b0a2f61fe7), [`26e968d`](https://github.com/mastra-ai/mastra/commit/26e968db2171ded9e4d47aa1b4f19e1e771158d0), [`cbd3fb6`](https://github.com/mastra-ai/mastra/commit/cbd3fb65adb03a7c0df193cb998aed5ac56675ee)]:
24
+ - @mastra/core@0.20.1-alpha.1
25
+ - @mastra/mcp@0.13.4-alpha.0
26
+
3
27
  ## 0.13.28-alpha.0
4
28
 
5
29
  ### Patch Changes
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/mcp-docs-server",
3
- "version": "0.13.28-alpha.0",
3
+ "version": "0.13.28-alpha.3",
4
4
  "description": "MCP server for accessing Mastra.ai documentation, changelogs, and news.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -33,8 +33,8 @@
33
33
  "uuid": "^11.1.0",
34
34
  "zod": "^3.25.76",
35
35
  "zod-to-json-schema": "^3.24.6",
36
- "@mastra/core": "0.20.1-alpha.0",
37
- "@mastra/mcp": "^0.13.3"
36
+ "@mastra/core": "0.20.1-alpha.3",
37
+ "@mastra/mcp": "^0.13.4-alpha.0"
38
38
  },
39
39
  "devDependencies": {
40
40
  "@hono/node-server": "^1.19.5",
@@ -49,8 +49,8 @@
49
49
  "tsx": "^4.19.4",
50
50
  "typescript": "^5.8.3",
51
51
  "vitest": "^3.2.4",
52
- "@mastra/core": "0.20.1-alpha.0",
53
- "@internal/lint": "0.0.46"
52
+ "@internal/lint": "0.0.46",
53
+ "@mastra/core": "0.20.1-alpha.3"
54
54
  },
55
55
  "homepage": "https://mastra.ai",
56
56
  "repository": {
@@ -1,284 +0,0 @@
1
- ---
2
- title: "Input Processors"
3
- description: "Learn how to use input processors to intercept and modify agent messages before they reach the language model."
4
- ---
5
-
6
- # Input Processors
7
-
8
- Input Processors allow you to intercept, modify, validate, or filter messages _before_ they are sent to the language model. This is useful for implementing guardrails, content moderation, message transformation, and security controls.
9
-
10
- Processors operate on the messages in your conversation thread. They can modify, filter, or validate content, and even abort the request entirely if certain conditions are met.
11
-
12
- ## Built-in Processors
13
-
14
- Mastra provides several built-in processors for common use cases:
15
-
16
- ### `UnicodeNormalizer`
17
-
18
- This processor normalizes Unicode text to ensure consistent formatting and remove potentially problematic characters.
19
-
20
- ```typescript copy showLineNumbers {9-11}
21
- import { Agent } from "@mastra/core/agent";
22
- import { UnicodeNormalizer } from "@mastra/core/processors";
23
- import { openai } from "@ai-sdk/openai";
24
-
25
- const agent = new Agent({
26
- name: 'normalized-agent',
27
- instructions: 'You are a helpful assistant',
28
- model: openai("gpt-4o"),
29
- inputProcessors: [
30
- new UnicodeNormalizer({
31
- stripControlChars: true,
32
- collapseWhitespace: true,
33
- }),
34
- ],
35
- });
36
- ```
37
-
38
- Available options:
39
- - `stripControlChars`: Remove control characters (default: false)
40
- - `preserveEmojis`: Keep emojis intact (default: true)
41
- - `collapseWhitespace`: Collapse multiple spaces/newlines (default: true)
42
- - `trim`: Remove leading/trailing whitespace (default: true)
43
-
44
- ### `ModerationProcessor`
45
-
46
- This processor provides content moderation using an LLM to detect inappropriate content across multiple categories.
47
-
48
- ```typescript copy showLineNumbers {5-13}
49
- import { ModerationProcessor } from "@mastra/core/processors";
50
-
51
- const agent = new Agent({
52
- inputProcessors: [
53
- new ModerationProcessor({
54
- model: openai("gpt-4.1-nano"), // Use a fast, cost-effective model
55
- threshold: 0.7, // Confidence threshold for flagging
56
- strategy: 'block', // Block flagged content
57
- categories: ['hate', 'harassment', 'violence'], // Custom categories
58
- }),
59
- ],
60
- });
61
- ```
62
-
63
- Available options:
64
- - `model`: Language model for moderation analysis (required)
65
- - `categories`: Array of categories to check (default: ['hate','hate/threatening','harassment','harassment/threatening','self-harm','self-harm/intent','self-harm/instructions','sexual','sexual/minors','violence','violence/graphic'])
66
- - `threshold`: Confidence threshold for flagging (0-1, default: 0.5)
67
- - `strategy`: Action when content is flagged (default: 'block')
68
- - `customInstructions`: Custom instructions for the moderation agent
69
-
70
- Strategies available:
71
- - `block`: Reject the request with an error (default)
72
- - `warn`: Log warning but allow content through
73
- - `filter`: Remove flagged messages but continue processing
74
-
75
- ### `PromptInjectionDetector`
76
-
77
- This processor detects and prevents prompt injection attacks, jailbreaks, and system manipulation attempts.
78
-
79
- ```typescript copy showLineNumbers {5-12}
80
- import { PromptInjectionDetector } from "@mastra/core/processors";
81
-
82
- const agent = new Agent({
83
- inputProcessors: [
84
- new PromptInjectionDetector({
85
- model: openai("gpt-4.1-nano"),
86
- threshold: 0.8, // Higher threshold for fewer false positives
87
- strategy: 'rewrite', // Attempt to neutralize while preserving intent
88
- detectionTypes: ['injection', 'jailbreak', 'system-override'],
89
- }),
90
- ],
91
- });
92
- ```
93
-
94
- Available options:
95
- - `model`: Language model for injection detection (required)
96
- - `detectionTypes`: Array of injection types to detect (default: ['injection', 'jailbreak', 'system-override'])
97
- - `threshold`: Confidence threshold for flagging (0-1, default: 0.7)
98
- - `strategy`: Action when injection is detected (default: 'block')
99
- - `instructions`: Custom detection instructions for the agent
100
- - `includeScores`: Whether to include confidence scores in logs (default: false)
101
-
102
- Strategies available:
103
- - `block`: Reject the request (default)
104
- - `warn`: Log warning but allow through
105
- - `filter`: Remove flagged messages
106
- - `rewrite`: Attempt to neutralize the injection while preserving legitimate intent
107
-
108
- ### `PIIDetector`
109
-
110
- This processor detects and optionally redacts personally identifiable information (PII) from messages.
111
-
112
- ```typescript copy showLineNumbers {5-14}
113
- import { PIIDetector } from "@mastra/core/processors";
114
-
115
- const agent = new Agent({
116
- inputProcessors: [
117
- new PIIDetector({
118
- model: openai("gpt-4.1-nano"),
119
- threshold: 0.6,
120
- strategy: 'redact', // Automatically redact detected PII
121
- detectionTypes: ['email', 'phone', 'credit-card', 'ssn', 'api-key', 'crypto-wallet', 'iban'],
122
- redactionMethod: 'mask', // Preserve format while masking
123
- preserveFormat: true, // Keep original structure in redacted values
124
- includeDetections: true, // Log details for compliance auditing
125
- }),
126
- ],
127
- });
128
- ```
129
-
130
- Available options:
131
- - `model`: Language model for PII detection (required)
132
- - `detectionTypes`: Array of PII types to detect (default: ['email', 'phone', 'credit-card', 'ssn', 'api-key', 'ip-address', 'name', 'address', 'date-of-birth', 'url', 'uuid', 'crypto-wallet', 'iban'])
133
- - `threshold`: Confidence threshold for flagging (0-1, default: 0.6)
134
- - `strategy`: Action when PII is detected (default: 'block')
135
- - `redactionMethod`: How to redact PII ('mask', 'hash', 'remove', 'placeholder', default: 'mask')
136
- - `preserveFormat`: Maintain PII structure during redaction (default: true)
137
- - `includeDetections`: Include detection details in logs for compliance (default: false)
138
- - `instructions`: Custom detection instructions for the agent
139
-
140
- Strategies available:
141
- - `block`: Reject requests containing PII (default)
142
- - `warn`: Log warning but allow through
143
- - `filter`: Remove messages containing PII
144
- - `redact`: Replace PII with placeholder values
145
-
146
- ### `LanguageDetector`
147
-
148
- This processor detects the language of incoming messages and can automatically translate them to a target language.
149
-
150
- ```typescript copy showLineNumbers {5-12}
151
- import { LanguageDetector } from "@mastra/core/processors";
152
-
153
- const agent = new Agent({
154
- inputProcessors: [
155
- new LanguageDetector({
156
- model: openai("gpt-4o-mini"),
157
- targetLanguages: ['English', 'en'], // Accept English content
158
- strategy: 'translate', // Auto-translate non-English content
159
- threshold: 0.8, // High confidence threshold
160
- }),
161
- ],
162
- });
163
- ```
164
-
165
- Available options:
166
- - `model`: Language model for detection and translation (required)
167
- - `targetLanguages`: Array of target languages (language names or ISO codes)
168
- - `threshold`: Confidence threshold for language detection (0-1, default: 0.7)
169
- - `strategy`: Action when non-target language is detected (default: 'detect')
170
- - `preserveOriginal`: Keep original content in metadata (default: true)
171
- - `instructions`: Custom detection instructions for the agent
172
-
173
- Strategies available:
174
- - `detect`: Only detect language, don't translate (default)
175
- - `translate`: Automatically translate to target language
176
- - `block`: Reject content not in target language
177
- - `warn`: Log warning but allow content through
178
-
179
- ## Applying Multiple Processors
180
-
181
- You can chain multiple processors. They execute sequentially in the order they appear in the `inputProcessors` array. The output of one processor becomes the input for the next.
182
-
183
- **Order matters!** Generally, it's best practice to place text normalization first, security checks next, and content modification last.
184
-
185
- ```typescript copy showLineNumbers {9-18}
186
- import { Agent } from "@mastra/core/agent";
187
- import {
188
- UnicodeNormalizer,
189
- ModerationProcessor,
190
- PromptInjectionDetector,
191
- PIIDetector
192
- } from "@mastra/core/processors";
193
-
194
- const secureAgent = new Agent({
195
- inputProcessors: [
196
- // 1. Normalize text first
197
- new UnicodeNormalizer({ stripControlChars: true }),
198
- // 2. Check for security threats
199
- new PromptInjectionDetector({ model: openai("gpt-4.1-nano") }),
200
- // 3. Moderate content
201
- new ModerationProcessor({ model: openai("gpt-4.1-nano") }),
202
- // 4. Handle PII last
203
- new PIIDetector({ model: openai("gpt-4.1-nano"), strategy: 'redact' }),
204
- ],
205
- });
206
- ```
207
-
208
- ## Creating Custom Processors
209
-
210
- You can create custom processors by implementing the `Processor` interface. A Processor can be used for input processing when it implements the `processInput` method.
211
-
212
- ```typescript copy showLineNumbers {5,10-33,39}
213
- import type { Processor } from "@mastra/core/processors";
214
- import type { MastraMessageV2 } from "@mastra/core/agent/message-list";
215
- import { TripWire } from "@mastra/core/agent";
216
-
217
- class MessageLengthLimiter implements Processor {
218
- readonly name = 'message-length-limiter';
219
-
220
- constructor(private maxLength: number = 1000) {}
221
-
222
- processInput({ messages, abort }: {
223
- messages: MastraMessageV2[];
224
- abort: (reason?: string) => never
225
- }): MastraMessageV2[] {
226
- // Check total message length
227
- try {
228
- const totalLength = messages.reduce((sum, msg) => {
229
- return sum + msg.content.parts
230
- .filter(part => part.type === 'text')
231
- .reduce((partSum, part) => partSum + (part as any).text.length, 0);
232
- }, 0);
233
-
234
- if (totalLength > this.maxLength) {
235
- abort(`Message too long: ${totalLength} characters (max: ${this.maxLength})`); // throws a TripWire error
236
- }
237
- } catch (error) {
238
- if (error instanceof TripWire) {
239
- throw error; // Re-throw tripwire errors
240
- }
241
- throw new Error(`Length validation failed: ${error instanceof Error ? error.message : 'Unknown error'}`); // application level throw a standard error
242
- }
243
-
244
- return messages;
245
- }
246
- }
247
-
248
- // Use the custom processor
249
- const agent = new Agent({
250
- inputProcessors: [
251
- new MessageLengthLimiter(2000), // Limit to 2000 characters
252
- ],
253
- });
254
- ```
255
-
256
- When creating custom processors:
257
- - Always return the `messages` array (potentially modified)
258
- - Use `abort(reason)` to terminate processing early. Abort is used to simulate blocking a message. errors thrown with `abort` will be an instance of TripWire. For code/application level errors, throw standard errors.
259
- - Mutate the input messages directly, make sure to mutate both the parts and content of a message.
260
- - Keep processors focused on a single responsibility
261
- - If using an agent inside your processor, use a fast model, limit the size of the response from it as much as possible (every token slows down the response exponentially), and make the system prompt as concise as possible, these are both latency bottlenecks.
262
-
263
- ## Integration with Agent Methods
264
-
265
- Input processors work with the `generate()` and `stream()` methods. The entire processor pipeline completes before the agent begins generating or streaming a response.
266
-
267
- ```typescript copy showLineNumbers
268
- // Processors run before generate()
269
- const result = await agent.generate('Hello');
270
-
271
- // Processors also run before stream()
272
- const stream = await agent.stream('Hello');
273
- for await (const chunk of stream) {
274
- console.log(chunk);
275
- }
276
- ```
277
-
278
- If any processor calls `abort()`, the request terminates immediately and subsequent processors are not executed. The agent returns a 200 response with details (`result.tripwireReason`) about why the request was blocked.
279
-
280
- ## Output Processors
281
-
282
- While input processors handle user messages before they reach the language model, **output processors** handle the LLM's responses after generation but before they're returned to the user. This is useful for response validation, content filtering, and safety controls on LLM-generated content.
283
-
284
- See the [Output Processors documentation](/docs/agents/output-processors) for details on processing LLM responses.