@mastra/mcp-docs-server 1.1.17-alpha.1 → 1.1.17-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/.docs/docs/memory/observational-memory.md +7 -5
  2. package/.docs/docs/observability/tracing/bridges/otel.md +3 -3
  3. package/.docs/docs/observability/tracing/exporters/sentry.md +1 -1
  4. package/.docs/docs/server/auth/okta.md +225 -0
  5. package/.docs/docs/server/auth.md +1 -0
  6. package/.docs/docs/workspace/lsp.md +116 -0
  7. package/.docs/docs/workspace/overview.md +15 -1
  8. package/.docs/guides/agent-frameworks/ai-sdk.md +3 -3
  9. package/.docs/models/gateways/openrouter.md +2 -1
  10. package/.docs/models/index.md +1 -1
  11. package/.docs/models/providers/zai.md +14 -13
  12. package/.docs/models/providers/zhipuai-coding-plan.md +3 -1
  13. package/.docs/models/providers/zhipuai.md +13 -12
  14. package/.docs/reference/ai-sdk/with-mastra.md +2 -2
  15. package/.docs/reference/auth/okta.md +162 -0
  16. package/.docs/reference/client-js/agents.md +2 -2
  17. package/.docs/reference/evals/noise-sensitivity.md +3 -3
  18. package/.docs/reference/harness/harness-class.md +2 -0
  19. package/.docs/reference/index.md +2 -0
  20. package/.docs/reference/memory/observational-memory.md +2 -2
  21. package/.docs/reference/observability/tracing/interfaces.md +1 -1
  22. package/.docs/reference/processors/message-history-processor.md +1 -1
  23. package/.docs/reference/processors/processor-interface.md +3 -3
  24. package/.docs/reference/processors/semantic-recall-processor.md +1 -1
  25. package/.docs/reference/processors/skill-search-processor.md +93 -0
  26. package/.docs/reference/processors/tool-call-filter.md +2 -2
  27. package/.docs/reference/processors/working-memory-processor.md +1 -1
  28. package/.docs/reference/streaming/agents/stream.md +1 -1
  29. package/.docs/reference/tools/mcp-client.md +1 -1
  30. package/CHANGELOG.md +14 -0
  31. package/package.json +4 -4
@@ -150,17 +150,19 @@ const memory = new Memory({
150
150
  observation: {
151
151
  model: new ModelByInputTokens({
152
152
  upTo: {
153
- 10_000: 'google/gemini-2.5-flash', // Fast and cheap for small inputs
154
- 40_000: 'openai/gpt-4o', // Stronger for medium inputs
155
- 1_000_000: 'openai/gpt-4.5', // Most capable for very large inputs
153
+ // Faster, cheaper models for smaller inputs; stronger models for larger contexts
154
+ 5_000: 'openrouter/mistralai/ministral-8b-2512',
155
+ 20_000: 'openrouter/mistralai/mistral-small-2603',
156
+ 40_000: 'openai/gpt-5.4-mini',
157
+ 1_000_000: 'google/gemini-3.1-flash-lite-preview',
156
158
  },
157
159
  }),
158
160
  },
159
161
  reflection: {
160
162
  model: new ModelByInputTokens({
161
163
  upTo: {
162
- 20_000: 'google/gemini-2.5-flash',
163
- 80_000: 'openai/gpt-4o',
164
+ 20_000: 'openai/gpt-5.4-mini',
165
+ 100_000: 'google/gemini-2.5-flash',
164
166
  },
165
167
  }),
166
168
  },
@@ -151,10 +151,10 @@ With the OtelBridge, your traces maintain proper hierarchy across OTEL and Mastr
151
151
  ```text
152
152
  HTTP POST /api/chat (from Hono middleware)
153
153
  └── agent.assistant (from Mastra via OtelBridge)
154
- ├── chat gpt-4o (LLM call)
154
+ ├── chat gpt-5.4 (LLM call)
155
155
  ├── tool.execute search (tool execution)
156
156
  │ └── HTTP GET api.example.com (from OTEL auto-instrumentation)
157
- └── chat gpt-4o (follow-up LLM call)
157
+ └── chat gpt-5.4 (follow-up LLM call)
158
158
  ```
159
159
 
160
160
  ## Multi-service distributed tracing
@@ -167,7 +167,7 @@ Service A: HTTP POST /api/process
167
167
 
168
168
  Service B: HTTP POST /api/analyze (incoming call - same trace!)
169
169
  └── agent.analyzer (Mastra agent inherits trace context)
170
- └── chat gpt-4o
170
+ └── chat gpt-5.4
171
171
  ```
172
172
 
173
173
  Both services must have:
@@ -162,7 +162,7 @@ The exporter uses standard GenAI semantic conventions with Sentry-specific attri
162
162
  **For MODEL\_GENERATION spans:**
163
163
 
164
164
  - `gen_ai.system`: Model provider (e.g., `openai`, `anthropic`)
165
- - `gen_ai.request.model`: Model identifier (e.g., `gpt-4`)
165
+ - `gen_ai.request.model`: Model identifier (e.g., `gpt-5.4`)
166
166
  - `gen_ai.response.model`: Response model
167
167
  - `gen_ai.response.text`: Output text response
168
168
  - `gen_ai.response.tool_calls`: Tool calls made during generation (JSON array)
@@ -0,0 +1,225 @@
1
+ # Okta
2
+
3
+ The `@mastra/auth-okta` package provides authentication and role-based access control for Mastra using Okta. It supports an OAuth 2.0 / OIDC login flow with encrypted session cookies and maps Okta groups to Mastra permissions.
4
+
5
+ ## Prerequisites
6
+
7
+ This guide uses Okta authentication. Make sure to:
8
+
9
+ 1. Create an Okta account at [okta.com](https://www.okta.com/)
10
+ 2. Set up an OAuth application in the Okta Admin Console (Web app, Authorization Code grant)
11
+ 3. Add your redirect URI to the application's sign-in redirect URIs
12
+ 4. Create an API token (required for RBAC)
13
+
14
+ Make sure your environment variables are set.
15
+
16
+ ```env
17
+ OKTA_DOMAIN=dev-123456.okta.com
18
+ OKTA_CLIENT_ID=your-client-id
19
+ OKTA_CLIENT_SECRET=your-client-secret
20
+ OKTA_REDIRECT_URI=http://localhost:4111/api/auth/callback
21
+ OKTA_COOKIE_PASSWORD=a-random-string-at-least-32-characters-long
22
+ OKTA_API_TOKEN=your-api-token
23
+ ```
24
+
25
+ > **Note:** `OKTA_COOKIE_PASSWORD` encrypts session cookies. If omitted, an auto-generated value is used that does not survive server restarts. Set it explicitly for production.
26
+ >
27
+ > `OKTA_API_TOKEN` is only required when using `MastraRBACOkta` to map Okta groups to permissions.
28
+
29
+ ## Installation
30
+
31
+ **npm**:
32
+
33
+ ```bash
34
+ npm install @mastra/auth-okta
35
+ ```
36
+
37
+ **pnpm**:
38
+
39
+ ```bash
40
+ pnpm add @mastra/auth-okta
41
+ ```
42
+
43
+ **Yarn**:
44
+
45
+ ```bash
46
+ yarn add @mastra/auth-okta
47
+ ```
48
+
49
+ **Bun**:
50
+
51
+ ```bash
52
+ bun add @mastra/auth-okta
53
+ ```
54
+
55
+ ## Usage examples
56
+
57
+ ### Basic usage with environment variables
58
+
59
+ With the environment variables above set, all constructor parameters are optional:
60
+
61
+ ```typescript
62
+ import { Mastra } from '@mastra/core'
63
+ import { MastraAuthOkta } from '@mastra/auth-okta'
64
+
65
+ export const mastra = new Mastra({
66
+ server: {
67
+ auth: new MastraAuthOkta(),
68
+ },
69
+ })
70
+ ```
71
+
72
+ ### Auth with RBAC
73
+
74
+ Add `MastraRBACOkta` to map Okta groups to Mastra permissions:
75
+
76
+ ```typescript
77
+ import { Mastra } from '@mastra/core'
78
+ import { MastraAuthOkta, MastraRBACOkta } from '@mastra/auth-okta'
79
+
80
+ export const mastra = new Mastra({
81
+ server: {
82
+ auth: new MastraAuthOkta(),
83
+ rbac: new MastraRBACOkta({
84
+ roleMapping: {
85
+ Admin: ['*'],
86
+ Engineering: ['agents:*', 'workflows:*', 'tools:*'],
87
+ Viewer: ['agents:read', 'workflows:read'],
88
+ _default: [], // users with unmapped groups get no permissions
89
+ },
90
+ }),
91
+ },
92
+ })
93
+ ```
94
+
95
+ ### Cross-provider usage
96
+
97
+ Use a different auth provider (Auth0, Clerk, etc.) for login and Okta for RBAC. Pass a `getUserId` function to resolve the Okta user ID from the other provider's user object:
98
+
99
+ ```typescript
100
+ import { Mastra } from '@mastra/core'
101
+ import { MastraAuthAuth0 } from '@mastra/auth-auth0'
102
+ import { MastraRBACOkta } from '@mastra/auth-okta'
103
+
104
+ export const mastra = new Mastra({
105
+ server: {
106
+ auth: new MastraAuthAuth0(),
107
+ rbac: new MastraRBACOkta({
108
+ getUserId: user => user.metadata?.oktaUserId || user.email,
109
+ roleMapping: {
110
+ Engineering: ['agents:*', 'workflows:*'],
111
+ Admin: ['*'],
112
+ _default: [],
113
+ },
114
+ }),
115
+ },
116
+ })
117
+ ```
118
+
119
+ > **Note:** To link users between providers, store the Okta user ID in the other provider's user metadata. Mastra uses this ID to fetch groups from Okta.
120
+
121
+ > **Info:** Visit [MastraAuthOkta](https://mastra.ai/reference/auth/okta) for all available configuration options.
122
+
123
+ ## Role mapping
124
+
125
+ The `roleMapping` option maps Okta group names to arrays of Mastra permission strings. Permissions follow a `resource:action` pattern and support wildcards:
126
+
127
+ ```typescript
128
+ const rbac = new MastraRBACOkta({
129
+ roleMapping: {
130
+ // full access to everything
131
+ Admin: ['*'],
132
+
133
+ // full access to agents and workflows
134
+ Engineering: ['agents:*', 'workflows:*'],
135
+
136
+ // read-only access
137
+ Viewer: ['agents:read', 'workflows:read'],
138
+
139
+ // users whose groups don't match any key above
140
+ _default: [],
141
+ },
142
+ })
143
+ ```
144
+
145
+ The `_default` key assigns permissions to users whose Okta groups do not match any other key.
146
+
147
+ ## Client-side setup
148
+
149
+ When auth is enabled, requests to Mastra routes require authentication. `MastraAuthOkta` uses SSO, so users authenticate through Okta's hosted login page. After login, an encrypted session cookie is set automatically.
150
+
151
+ ### Cookie session (recommended)
152
+
153
+ For cross-origin requests (e.g. a frontend on `:3000` calling Mastra on `:4111`), enable CORS credentials on the Mastra server:
154
+
155
+ ```typescript
156
+ export const mastra = new Mastra({
157
+ server: {
158
+ auth: new MastraAuthOkta(),
159
+ cors: {
160
+ origin: 'http://localhost:3000',
161
+ credentials: true,
162
+ },
163
+ },
164
+ })
165
+ ```
166
+
167
+ Configure the client to include credentials:
168
+
169
+ ```typescript
170
+ import { MastraClient } from '@mastra/client-js'
171
+
172
+ export const mastraClient = new MastraClient({
173
+ baseUrl: 'http://localhost:4111',
174
+ credentials: 'include',
175
+ })
176
+ ```
177
+
178
+ ### Bearer token
179
+
180
+ You can also pass an Okta access token as a Bearer token. The token is verified against Okta's JWKS endpoint:
181
+
182
+ ```typescript
183
+ import { MastraClient } from '@mastra/client-js'
184
+
185
+ export const createMastraClient = (accessToken: string) => {
186
+ return new MastraClient({
187
+ baseUrl: 'http://localhost:4111',
188
+ headers: {
189
+ Authorization: `Bearer ${accessToken}`,
190
+ },
191
+ })
192
+ }
193
+ ```
194
+
195
+ > **Info:** Visit [Mastra Client SDK](https://mastra.ai/docs/server/mastra-client) for more configuration options.
196
+
197
+ ### Making authenticated requests
198
+
199
+ **MastraClient**:
200
+
201
+ ```typescript
202
+ import { mastraClient } from '../lib/mastra-client'
203
+
204
+ const agent = mastraClient.getAgent('weatherAgent')
205
+ const response = await agent.generate('Weather in London')
206
+ console.log(response)
207
+ ```
208
+
209
+ **cURL**:
210
+
211
+ ```bash
212
+ curl -X POST http://localhost:4111/api/agents/weatherAgent/generate \
213
+ -H "Content-Type: application/json" \
214
+ -H "Authorization: Bearer <your-okta-access-token>" \
215
+ -d '{
216
+ "messages": "Weather in London"
217
+ }'
218
+ ```
219
+
220
+ ## Troubleshooting
221
+
222
+ - **401 on every request**: Verify your Okta domain, client ID, and client secret are correct. Check that the redirect URI in your Okta application matches `OKTA_REDIRECT_URI`.
223
+ - **Cookies not sent cross-origin**: Set `credentials: "include"` in `MastraClient` and configure `server.cors` with your frontend origin and `credentials: true`.
224
+ - **Session lost on restart**: Set `OKTA_COOKIE_PASSWORD` to a stable value (at least 32 characters). Without it, an auto-generated key is used that changes on each restart.
225
+ - **RBAC returns empty permissions**: Verify `OKTA_API_TOKEN` is set and the token has permission to list user groups. Check that group names in `roleMapping` match your Okta group names exactly.
@@ -30,6 +30,7 @@ See [Custom API Routes](https://mastra.ai/docs/server/custom-api-routes) for con
30
30
  - [Better Auth](https://mastra.ai/docs/server/auth/better-auth)
31
31
  - [Clerk](https://mastra.ai/docs/server/auth/clerk)
32
32
  - [Firebase](https://mastra.ai/docs/server/auth/firebase)
33
+ - [Okta](https://mastra.ai/docs/server/auth/okta)
33
34
  - [Supabase](https://mastra.ai/docs/server/auth/supabase)
34
35
  - [WorkOS](https://mastra.ai/docs/server/auth/workos)
35
36
 
@@ -0,0 +1,116 @@
1
+ # LSP inspection
2
+
3
+ **Added in:** `@mastra/core@1.1.0`
4
+
5
+ LSP inspection gives workspace-backed agents semantic code intelligence. When you enable LSP on a workspace, agents can inspect symbols in supported files to retrieve hover information, jump to definitions, and find implementations.
6
+
7
+ ## When to use LSP inspection
8
+
9
+ Use LSP inspection when your agent needs semantic code understanding instead of plain-text search alone:
10
+
11
+ - Inspect TypeScript or JavaScript symbols and their inferred types
12
+ - Find where a symbol is declared before editing related code
13
+ - Explore implementations across a codebase without manually tracing every file
14
+ - Combine semantic inspection with `view` and `search_content` for faster navigation
15
+
16
+ ## Basic usage
17
+
18
+ Enable LSP on a workspace by setting `lsp: true`:
19
+
20
+ ```typescript
21
+ import { Workspace, LocalFilesystem, LocalSandbox } from '@mastra/core/workspace'
22
+
23
+ const workspace = new Workspace({
24
+ filesystem: new LocalFilesystem({ basePath: './workspace' }),
25
+ sandbox: new LocalSandbox({ workingDirectory: './workspace' }),
26
+ lsp: true,
27
+ })
28
+ ```
29
+
30
+ With this configuration, the workspace registers the default LSP inspection tool alongside the configured filesystem and sandbox tools.
31
+
32
+ ## Agent tool
33
+
34
+ When LSP is enabled, the workspace exposes `mastra_workspace_lsp_inspect` by default.
35
+
36
+ ```json
37
+ {
38
+ "path": "/absolute/path/to/file.ts",
39
+ "line": 10,
40
+ "match": "const foo = <<<bar()"
41
+ }
42
+ ```
43
+
44
+ The `match` field must include exactly one `<<<` cursor marker. The marker identifies the symbol position on the specified line.
45
+
46
+ The tool returns up to three result groups:
47
+
48
+ | Result | Description |
49
+ | ---------------- | ---------------------------------------------------------------- |
50
+ | `hover` | Type information or documentation for the symbol at the cursor |
51
+ | `diagnostics` | Line-scoped LSP diagnostics for the inspected line, when present |
52
+ | `definition` | Declaration locations with a one-line preview |
53
+ | `implementation` | Implementation or usage locations |
54
+
55
+ ## Tool name remapping
56
+
57
+ Rename the tool if your agent expects a shorter name:
58
+
59
+ ```typescript
60
+ import { Workspace, LocalFilesystem, WORKSPACE_TOOLS } from '@mastra/core/workspace'
61
+
62
+ const workspace = new Workspace({
63
+ filesystem: new LocalFilesystem({ basePath: './workspace' }),
64
+ lsp: true,
65
+ tools: {
66
+ [WORKSPACE_TOOLS.LSP.LSP_INSPECT]: {
67
+ name: 'lsp_inspect',
68
+ },
69
+ },
70
+ })
71
+ ```
72
+
73
+ This changes the exposed tool name only. The configuration key stays `WORKSPACE_TOOLS.LSP.LSP_INSPECT`.
74
+
75
+ ## LSP configuration
76
+
77
+ Set `lsp` to `true` for default behavior, or provide an object to customize server startup and diagnostics:
78
+
79
+ ```typescript
80
+ import { Workspace, LocalFilesystem } from '@mastra/core/workspace'
81
+
82
+ const workspace = new Workspace({
83
+ filesystem: new LocalFilesystem({ basePath: './workspace' }),
84
+ lsp: {
85
+ diagnosticTimeout: 4000,
86
+ initTimeout: 8000,
87
+ disableServers: ['pyright'],
88
+ binaryOverrides: {
89
+ typescript: '/custom/path/to/typescript-language-server',
90
+ },
91
+ searchPaths: ['/opt/homebrew/bin'],
92
+ },
93
+ })
94
+ ```
95
+
96
+ Use custom configuration when you need to:
97
+
98
+ - Increase timeouts for large repositories
99
+ - Disable specific language servers
100
+ - Point Mastra at custom language server binaries
101
+ - Add extra binary search paths in constrained environments
102
+
103
+ ## Requirements and limitations
104
+
105
+ - LSP inspection only works for file types with a matching language server
106
+ - The `path` you inspect must resolve inside the workspace filesystem or allowed paths
107
+ - External package inspection may resolve to declaration files such as `.d.ts` instead of runtime source files
108
+ - `lsp_inspect` complements `view` and `search_content`, but does not replace reading implementation code when you need full context
109
+
110
+ ## Related
111
+
112
+ - [Workspace overview](https://mastra.ai/docs/workspace/overview)
113
+ - [Filesystem](https://mastra.ai/docs/workspace/filesystem)
114
+ - [Sandbox](https://mastra.ai/docs/workspace/sandbox)
115
+ - [Search and indexing](https://mastra.ai/docs/workspace/search)
116
+ - [Workspace class reference](https://mastra.ai/reference/workspace/workspace-class)
@@ -8,9 +8,14 @@ A workspace supports the following features:
8
8
 
9
9
  - **[Filesystem](https://mastra.ai/docs/workspace/filesystem)**: File storage (read, write, list, delete, copy, move, grep)
10
10
  - **[Sandbox](https://mastra.ai/docs/workspace/sandbox)**: Command execution (shell commands) and background processes
11
+ - **[LSP inspection](https://mastra.ai/docs/workspace/lsp)**: Hover, definition, and implementation queries through language servers
11
12
  - **[Search](https://mastra.ai/docs/workspace/search)**: BM25, vector, or hybrid search over indexed content
12
13
  - **[Skills](https://mastra.ai/docs/workspace/skills)**: Reusable instructions for agents
13
14
 
15
+ ## When to use workspaces
16
+
17
+ Use a workspace when your agent needs access to the local filesystem, shell commands, semantic code inspection, indexed search, or reusable skill instructions.
18
+
14
19
  ## How it works
15
20
 
16
21
  When you assign a workspace to an agent, Mastra includes the corresponding tools in the agent's toolset. The agent can then use these tools to interact with files and execute commands.
@@ -208,16 +213,24 @@ import { Workspace, LocalFilesystem, LocalSandbox, WORKSPACE_TOOLS } from '@mast
208
213
  const workspace = new Workspace({
209
214
  filesystem: new LocalFilesystem({ basePath: './workspace' }),
210
215
  sandbox: new LocalSandbox({ workingDirectory: './workspace' }),
216
+ lsp: true,
211
217
  tools: {
212
218
  [WORKSPACE_TOOLS.FILESYSTEM.READ_FILE]: { name: 'view' },
213
219
  [WORKSPACE_TOOLS.FILESYSTEM.GREP]: { name: 'search_content' },
214
220
  [WORKSPACE_TOOLS.FILESYSTEM.LIST_FILES]: { name: 'find_files' },
215
221
  [WORKSPACE_TOOLS.SANDBOX.EXECUTE_COMMAND]: { name: 'execute_command' },
222
+ [WORKSPACE_TOOLS.LSP.LSP_INSPECT]: { name: 'lsp_inspect' },
216
223
  },
217
224
  })
218
225
  ```
219
226
 
220
- The agent sees `view`, `search_content`, `find_files`, and `execute_command` instead of the default `mastra_workspace_*` names. Tool names must be unique — duplicate names or conflicts with other default names throw an error.
227
+ The agent sees `view`, `search_content`, `find_files`, `execute_command`, and `lsp_inspect` instead of the default `mastra_workspace_*` names. Tool names must be unique — duplicate names or conflicts with other default names throw an error.
228
+
229
+ ## LSP inspection
230
+
231
+ Enable `lsp` on a workspace to add semantic code inspection through language servers. This adds the `mastra_workspace_lsp_inspect` tool by default, which can return hover information, definition locations, and implementations for a symbol at a specific cursor position.
232
+
233
+ See [LSP inspection](https://mastra.ai/docs/workspace/lsp) for configuration, examples, and tool name remapping.
221
234
 
222
235
  ### Output truncation
223
236
 
@@ -294,6 +307,7 @@ External providers may perform additional setup like establishing connections or
294
307
 
295
308
  - [Filesystem](https://mastra.ai/docs/workspace/filesystem)
296
309
  - [Sandbox](https://mastra.ai/docs/workspace/sandbox)
310
+ - [LSP inspection](https://mastra.ai/docs/workspace/lsp)
297
311
  - [Skills](https://mastra.ai/docs/workspace/skills)
298
312
  - [Search and indexing](https://mastra.ai/docs/workspace/search)
299
313
  - [Workspace class reference](https://mastra.ai/reference/workspace/workspace-class)
@@ -56,7 +56,7 @@ const loggingProcessor: Processor<'logger'> = {
56
56
  },
57
57
  }
58
58
 
59
- const model = withMastra(openai('gpt-4o'), {
59
+ const model = withMastra(openai('gpt-5.4'), {
60
60
  inputProcessors: [loggingProcessor],
61
61
  outputProcessors: [loggingProcessor],
62
62
  })
@@ -85,7 +85,7 @@ await storage.init()
85
85
 
86
86
  const memoryStorage = await storage.getStore('memory')
87
87
 
88
- const model = withMastra(openai('gpt-4o'), {
88
+ const model = withMastra(openai('gpt-5.4'), {
89
89
  memory: {
90
90
  storage: memoryStorage!,
91
91
  threadId: 'user-thread-123',
@@ -115,7 +115,7 @@ await storage.init()
115
115
 
116
116
  const memoryStorage = await storage.getStore('memory')
117
117
 
118
- const model = withMastra(openai('gpt-4o'), {
118
+ const model = withMastra(openai('gpt-5.4'), {
119
119
  inputProcessors: [myGuardProcessor],
120
120
  outputProcessors: [myLoggingProcessor],
121
121
  memory: {
@@ -1,6 +1,6 @@
1
1
  # ![OpenRouter logo](https://models.dev/logos/openrouter.svg)OpenRouter
2
2
 
3
- OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 164 models through Mastra's model router.
3
+ OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 165 models through Mastra's model router.
4
4
 
5
5
  Learn more in the [OpenRouter documentation](https://openrouter.ai/models).
6
6
 
@@ -103,6 +103,7 @@ ANTHROPIC_API_KEY=ant-...
103
103
  | `mistralai/devstral-small-2507` |
104
104
  | `mistralai/mistral-medium-3` |
105
105
  | `mistralai/mistral-medium-3.1` |
106
+ | `mistralai/mistral-small-2603` |
106
107
  | `mistralai/mistral-small-3.1-24b-instruct` |
107
108
  | `mistralai/mistral-small-3.2-24b-instruct` |
108
109
  | `moonshotai/kimi-k2` |
@@ -1,6 +1,6 @@
1
1
  # Model Providers
2
2
 
3
- Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3391 models from 94 providers through a single API.
3
+ Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3396 models from 94 providers through a single API.
4
4
 
5
5
  ## Features
6
6
 
@@ -1,6 +1,6 @@
1
1
  # ![Z.AI logo](https://models.dev/logos/zai.svg)Z.AI
2
2
 
3
- Access 10 Z.AI models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
3
+ Access 11 Z.AI models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Z.AI documentation](https://docs.z.ai/guides/overview/pricing).
6
6
 
@@ -32,18 +32,19 @@ for await (const chunk of stream) {
32
32
 
33
33
  ## Models
34
34
 
35
- | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
- | ------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
- | `zai/glm-4.5` | 131K | | | | | | $0.60 | $2 |
38
- | `zai/glm-4.5-air` | 131K | | | | | | $0.20 | $1 |
39
- | `zai/glm-4.5-flash` | 131K | | | | | | — | — |
40
- | `zai/glm-4.5v` | 64K | | | | | | $0.60 | $2 |
41
- | `zai/glm-4.6` | 205K | | | | | | $0.60 | $2 |
42
- | `zai/glm-4.6v` | 128K | | | | | | $0.30 | $0.90 |
43
- | `zai/glm-4.7` | 205K | | | | | | $0.60 | $2 |
44
- | `zai/glm-4.7-flash` | 200K | | | | | | — | — |
45
- | `zai/glm-5` | 205K | | | | | | $1 | $3 |
46
- | `zai/glm-5-turbo` | 200K | | | | | | $1 | $4 |
35
+ | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
+ | -------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
+ | `zai/glm-4.5` | 131K | | | | | | $0.60 | $2 |
38
+ | `zai/glm-4.5-air` | 131K | | | | | | $0.20 | $1 |
39
+ | `zai/glm-4.5-flash` | 131K | | | | | | — | — |
40
+ | `zai/glm-4.5v` | 64K | | | | | | $0.60 | $2 |
41
+ | `zai/glm-4.6` | 205K | | | | | | $0.60 | $2 |
42
+ | `zai/glm-4.6v` | 128K | | | | | | $0.30 | $0.90 |
43
+ | `zai/glm-4.7` | 205K | | | | | | $0.60 | $2 |
44
+ | `zai/glm-4.7-flash` | 200K | | | | | | — | — |
45
+ | `zai/glm-4.7-flashx` | 200K | | | | | | $0.07 | $0.40 |
46
+ | `zai/glm-5` | 205K | | | | | | $1 | $3 |
47
+ | `zai/glm-5-turbo` | 200K | | | | | | $1 | $4 |
47
48
 
48
49
  ## Advanced configuration
49
50
 
@@ -1,6 +1,6 @@
1
1
  # ![Zhipu AI Coding Plan logo](https://models.dev/logos/zhipuai-coding-plan.svg)Zhipu AI Coding Plan
2
2
 
3
- Access 10 Zhipu AI Coding Plan models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
3
+ Access 12 Zhipu AI Coding Plan models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Zhipu AI Coding Plan documentation](https://docs.bigmodel.cn/cn/coding-plan/overview).
6
6
 
@@ -42,6 +42,8 @@ for await (const chunk of stream) {
42
42
  | `zhipuai-coding-plan/glm-4.6v` | 128K | | | | | | — | — |
43
43
  | `zhipuai-coding-plan/glm-4.6v-flash` | 128K | | | | | | — | — |
44
44
  | `zhipuai-coding-plan/glm-4.7` | 205K | | | | | | — | — |
45
+ | `zhipuai-coding-plan/glm-4.7-flash` | 200K | | | | | | — | — |
46
+ | `zhipuai-coding-plan/glm-4.7-flashx` | 200K | | | | | | $0.07 | $0.40 |
45
47
  | `zhipuai-coding-plan/glm-5` | 205K | | | | | | — | — |
46
48
  | `zhipuai-coding-plan/glm-5-turbo` | 200K | | | | | | — | — |
47
49
 
@@ -1,6 +1,6 @@
1
1
  # ![Zhipu AI logo](https://models.dev/logos/zhipuai.svg)Zhipu AI
2
2
 
3
- Access 9 Zhipu AI models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
3
+ Access 10 Zhipu AI models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Zhipu AI documentation](https://docs.z.ai/guides/overview/pricing).
6
6
 
@@ -32,17 +32,18 @@ for await (const chunk of stream) {
32
32
 
33
33
  ## Models
34
34
 
35
- | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
- | ----------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
- | `zhipuai/glm-4.5` | 131K | | | | | | $0.60 | $2 |
38
- | `zhipuai/glm-4.5-air` | 131K | | | | | | $0.20 | $1 |
39
- | `zhipuai/glm-4.5-flash` | 131K | | | | | | — | — |
40
- | `zhipuai/glm-4.5v` | 64K | | | | | | $0.60 | $2 |
41
- | `zhipuai/glm-4.6` | 205K | | | | | | $0.60 | $2 |
42
- | `zhipuai/glm-4.6v` | 128K | | | | | | $0.30 | $0.90 |
43
- | `zhipuai/glm-4.7` | 205K | | | | | | $0.60 | $2 |
44
- | `zhipuai/glm-4.7-flash` | 200K | | | | | | — | — |
45
- | `zhipuai/glm-5` | 205K | | | | | | $1 | $3 |
35
+ | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
+ | ------------------------ | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
+ | `zhipuai/glm-4.5` | 131K | | | | | | $0.60 | $2 |
38
+ | `zhipuai/glm-4.5-air` | 131K | | | | | | $0.20 | $1 |
39
+ | `zhipuai/glm-4.5-flash` | 131K | | | | | | — | — |
40
+ | `zhipuai/glm-4.5v` | 64K | | | | | | $0.60 | $2 |
41
+ | `zhipuai/glm-4.6` | 205K | | | | | | $0.60 | $2 |
42
+ | `zhipuai/glm-4.6v` | 128K | | | | | | $0.30 | $0.90 |
43
+ | `zhipuai/glm-4.7` | 205K | | | | | | $0.60 | $2 |
44
+ | `zhipuai/glm-4.7-flash` | 200K | | | | | | — | — |
45
+ | `zhipuai/glm-4.7-flashx` | 200K | | | | | | $0.07 | $0.40 |
46
+ | `zhipuai/glm-5` | 205K | | | | | | $1 | $3 |
46
47
 
47
48
  ## Advanced configuration
48
49
 
@@ -18,7 +18,7 @@ const loggingProcessor: Processor<'logger'> = {
18
18
  },
19
19
  }
20
20
 
21
- const model = withMastra(openai('gpt-4o'), {
21
+ const model = withMastra(openai('gpt-5.4'), {
22
22
  inputProcessors: [loggingProcessor],
23
23
  })
24
24
 
@@ -30,7 +30,7 @@ const { text } = await generateText({
30
30
 
31
31
  ## Parameters
32
32
 
33
- **model** (`LanguageModelV2 | LanguageModelV3`): Any AI SDK v5 or v6 language model (e.g., \`openai('gpt-4o')\`, \`anthropic('claude-3-opus')\`).
33
+ **model** (`LanguageModelV2 | LanguageModelV3`): Any AI SDK v5 or v6 language model (e.g., \`openai('gpt-5.4')\`, \`anthropic('claude-opus-4-6')\`).
34
34
 
35
35
  **options** (`WithMastraOptions`): Configuration object for processors and memory.
36
36
 
@@ -0,0 +1,162 @@
1
+ # MastraAuthOkta & MastraRBACOkta class
2
+
3
+ ## MastraAuthOkta class
4
+
5
+ The `MastraAuthOkta` class provides authentication for Mastra using Okta. It implements an OAuth 2.0 / OIDC login flow with encrypted session cookies and integrates with the Mastra server using the `auth` option.
6
+
7
+ ### Usage example
8
+
9
+ ```typescript
10
+ import { Mastra } from '@mastra/core'
11
+ import { MastraAuthOkta } from '@mastra/auth-okta'
12
+
13
+ export const mastra = new Mastra({
14
+ server: {
15
+ auth: new MastraAuthOkta({
16
+ domain: process.env.OKTA_DOMAIN,
17
+ clientId: process.env.OKTA_CLIENT_ID,
18
+ clientSecret: process.env.OKTA_CLIENT_SECRET,
19
+ redirectUri: process.env.OKTA_REDIRECT_URI,
20
+ }),
21
+ },
22
+ })
23
+ ```
24
+
25
+ > **Note:** You can omit the constructor parameters if you have the appropriately named environment variables set. In that case, use `new MastraAuthOkta()` without any arguments.
26
+
27
+ ### Constructor parameters
28
+
29
+ **domain** (`string`): Your Okta domain (e.g., \`dev-123456.okta.com\`). Used to construct the issuer URL and API endpoints. (Default: `process.env.OKTA_DOMAIN`)
30
+
31
+ **clientId** (`string`): The OAuth client ID from your Okta application. (Default: `process.env.OKTA_CLIENT_ID`)
32
+
33
+ **clientSecret** (`string`): The OAuth client secret. Required for the SSO authorization code flow. (Default: `process.env.OKTA_CLIENT_SECRET`)
34
+
35
+ **issuer** (`string`): The token issuer URL. Override this if you use a custom authorization server. (Default: `` `https://{domain}/oauth2/default` ``)
36
+
37
+ **redirectUri** (`string`): The OAuth redirect URI for the SSO callback. Must match the redirect URI configured in your Okta application. (Default: `process.env.OKTA_REDIRECT_URI`)
38
+
39
+ **scopes** (`string[]`): OAuth scopes to request during the login flow. (Default: `['openid', 'profile', 'email', 'groups']`)
40
+
41
+ **apiToken** (`string`): Okta API token for user lookups via the Users API. Required for \`getUser()\` to return user data by ID. (Default: `process.env.OKTA_API_TOKEN`)
42
+
43
+ **session** (`OktaSessionOptions`): Session cookie configuration.
44
+
45
+ **session.cookieName** (`string`): Name of the session cookie.
46
+
47
+ **session.cookieMaxAge** (`number`): Cookie max age in seconds.
48
+
49
+ **session.cookiePassword** (`string`): Password for encrypting session cookies. Must be at least 32 characters. If not set, an auto-generated value is used that does not survive restarts.
50
+
51
+ **session.secureCookies** (`boolean`): Set the \`Secure\` flag on session cookies.
52
+
53
+ **name** (`string`): Custom name for the auth provider instance. (Default: `'okta'`)
54
+
55
+ ### Environment variables
56
+
57
+ The following environment variables are automatically used when constructor options are not provided:
58
+
59
+ **OKTA\_DOMAIN** (`string`): Your Okta domain (e.g., \`dev-123456.okta.com\`). Found in your Okta admin console.
60
+
61
+ **OKTA\_CLIENT\_ID** (`string`): The OAuth client ID from your Okta application.
62
+
63
+ **OKTA\_CLIENT\_SECRET** (`string`): The OAuth client secret from your Okta application.
64
+
65
+ **OKTA\_ISSUER** (`string`): Token issuer URL. Defaults to \`https\://{domain}/oauth2/default\` if not set.
66
+
67
+ **OKTA\_REDIRECT\_URI** (`string`): OAuth redirect URI for the SSO callback.
68
+
69
+ **OKTA\_COOKIE\_PASSWORD** (`string`): Password for encrypting session cookies. Must be at least 32 characters.
70
+
71
+ **OKTA\_API\_TOKEN** (`string`): Okta API token for user lookups and RBAC group resolution.
72
+
73
+ ### Authentication flow
74
+
75
+ `MastraAuthOkta` authenticates requests in the following order:
76
+
77
+ 1. **Session cookie**: Reads the encrypted session cookie and decrypts it. If the session is valid and not expired, the user is authenticated.
78
+ 2. **JWT fallback**: If no session cookie is present, verifies the `Authorization` header token against Okta's JWKS endpoint.
79
+
80
+ After authentication, `authorizeUser` checks that the user has a valid `oktaId`. Provide a custom `authorizeUser` function to implement additional logic.
81
+
82
+ ### `OktaUser` type
83
+
84
+ The `OktaUser` type extends the base `EEUser` interface with Okta-specific fields:
85
+
86
+ **id** (`string`): User identifier (maps to the \`sub\` claim).
87
+
88
+ **oktaId** (`string`): Okta user ID (same as \`id\`).
89
+
90
+ **email** (`string`): User email address.
91
+
92
+ **name** (`string`): User display name, constructed from token claims.
93
+
94
+ **avatarUrl** (`string`): URL to the user's profile picture.
95
+
96
+ **groups** (`string[]`): Okta groups the user belongs to, populated from the \`groups\` claim.
97
+
98
+ ## MastraRBACOkta class
99
+
100
+ The `MastraRBACOkta` class maps Okta groups to Mastra permissions. It fetches user groups from the Okta API and resolves them against a configurable role mapping. Use it with `MastraAuthOkta` or any other auth provider.
101
+
102
+ > **Note:** RBAC requires a valid Enterprise Edition license. It works without a license in development so you can try it locally, but you’ll need a license for production. [Contact sales](https://mastra.ai/contact) for more information.
103
+
104
+ ### Usage example
105
+
106
+ Use `MastraRBACOkta` alongside an auth provider by passing it to the `rbac` option:
107
+
108
+ ```typescript
109
+ import { Mastra } from '@mastra/core'
110
+ import { MastraAuthOkta, MastraRBACOkta } from '@mastra/auth-okta'
111
+
112
+ export const mastra = new Mastra({
113
+ server: {
114
+ auth: new MastraAuthOkta(),
115
+ rbac: new MastraRBACOkta({
116
+ roleMapping: {
117
+ Admin: ['*'],
118
+ Engineering: ['agents:*', 'workflows:*', 'tools:*'],
119
+ Viewer: ['agents:read', 'workflows:read'],
120
+ _default: [],
121
+ },
122
+ }),
123
+ },
124
+ })
125
+ ```
126
+
127
+ To use Okta RBAC with a different auth provider, pass a `getUserId` function to resolve the Okta user ID from the other provider's user object:
128
+
129
+ ```typescript
130
+ import { MastraAuthAuth0 } from '@mastra/auth-auth0'
131
+ import { MastraRBACOkta } from '@mastra/auth-okta'
132
+
133
+ export const mastra = new Mastra({
134
+ server: {
135
+ auth: new MastraAuthAuth0(),
136
+ rbac: new MastraRBACOkta({
137
+ getUserId: user => user.metadata?.oktaUserId || user.email,
138
+ roleMapping: {
139
+ Engineering: ['agents:*', 'workflows:*'],
140
+ Admin: ['*'],
141
+ _default: [],
142
+ },
143
+ }),
144
+ },
145
+ })
146
+ ```
147
+
148
+ ### Constructor parameters
149
+
150
+ **roleMapping** (`RoleMapping`): Maps Okta group names to arrays of Mastra permission strings. Use \`'\_default'\` to assign permissions to users who do not match any group. Supports wildcards like \`'\*'\` (full access) and \`'agents:\*'\` (all agent actions).
151
+
152
+ **domain** (`string`): Your Okta domain. Used to initialize the Okta management SDK. (Default: `process.env.OKTA_DOMAIN`)
153
+
154
+ **apiToken** (`string`): Okta API token for the management SDK. Required to fetch user groups from the Okta API. (Default: `process.env.OKTA_API_TOKEN`)
155
+
156
+ **getUserId** (`(user: unknown) => string | undefined`): Extract the Okta user ID from a user object. Use this when combining Okta RBAC with a different auth provider. If not provided, falls back to \`oktaId\` or \`id\` on the user object.
157
+
158
+ **cache** (`PermissionCacheOptions`): Configure the LRU cache for group lookups.
159
+
160
+ **cache.maxSize** (`number`): Maximum number of users to cache.
161
+
162
+ **cache.ttlMs** (`number`): Time-to-live in milliseconds.
@@ -344,7 +344,7 @@ const agent = await mastraClient.createStoredAgent({
344
344
  instructions: 'You are a helpful assistant.',
345
345
  model: {
346
346
  provider: 'openai',
347
- name: 'gpt-4',
347
+ name: 'gpt-5.4',
348
348
  },
349
349
  })
350
350
  ```
@@ -359,7 +359,7 @@ const agent = await mastraClient.createStoredAgent({
359
359
  instructions: 'You are a helpful assistant.',
360
360
  model: {
361
361
  provider: 'openai',
362
- name: 'gpt-4',
362
+ name: 'gpt-5.4',
363
363
  },
364
364
  tools: ['calculator', 'weather'],
365
365
  workflows: ['data-processing'],
@@ -546,9 +546,9 @@ import { createNoiseSensitivityScorerLLM } from '@mastra/evals'
546
546
 
547
547
  async function compareModelRobustness() {
548
548
  const models = [
549
- { name: 'GPT-5.1', model: 'openai/gpt-5.4' },
550
- { name: 'GPT-4.1', model: 'openai/gpt-4.1' },
551
- { name: 'Claude', model: 'anthropic/claude-sonnet-4-6' },
549
+ { name: 'GPT-5.4', model: 'openai/gpt-5.4' },
550
+ { name: 'GPT-5.4-mini', model: 'openai/gpt-5.4-mini' },
551
+ { name: 'Claude', model: 'anthropic/claude-opus-4-6' },
552
552
  ]
553
553
 
554
554
  const testScenario = {
@@ -94,6 +94,8 @@ await harness.sendMessage({ content: 'Hello!' })
94
94
 
95
95
  **omConfig** (`HarnessOMConfig`): Default configuration for observational memory (observer/reflector model IDs and thresholds).
96
96
 
97
+ **disableBuiltinTools** (`BuiltinToolId[]`): Built-in harness tool IDs to remove from the \`harnessBuiltIn\` toolset. Valid values are \`ask\_user\`, \`submit\_plan\`, \`task\_write\`, \`task\_check\`, and \`subagent\`.
98
+
97
99
  **heartbeatHandlers** (`HeartbeatHandler[]`): Periodic background tasks started during \`init()\`. Use for gateway sync, cache refresh, and similar tasks.
98
100
 
99
101
  **idGenerator** (`() => string`): Custom ID generator for Harness-managed IDs such as threads and mode-run identifiers. (Default: `timestamp + random string`)
@@ -35,6 +35,7 @@ The Reference section provides documentation of Mastra's API, including paramete
35
35
  - [Clerk](https://mastra.ai/reference/auth/clerk)
36
36
  - [Firebase](https://mastra.ai/reference/auth/firebase)
37
37
  - [JSON Web Token](https://mastra.ai/reference/auth/jwt)
38
+ - [Okta](https://mastra.ai/reference/auth/okta)
38
39
  - [Supabase](https://mastra.ai/reference/auth/supabase)
39
40
  - [WorkOS](https://mastra.ai/reference/auth/workos)
40
41
  - [create-mastra](https://mastra.ai/reference/cli/create-mastra)
@@ -152,6 +153,7 @@ The Reference section provides documentation of Mastra's API, including paramete
152
153
  - [Processor Interface](https://mastra.ai/reference/processors/processor-interface)
153
154
  - [PromptInjectionDetector](https://mastra.ai/reference/processors/prompt-injection-detector)
154
155
  - [SemanticRecall](https://mastra.ai/reference/processors/semantic-recall-processor)
156
+ - [SkillSearchProcessor](https://mastra.ai/reference/processors/skill-search-processor)
155
157
  - [SystemPromptScrubber](https://mastra.ai/reference/processors/system-prompt-scrubber)
156
158
  - [TokenLimiterProcessor](https://mastra.ai/reference/processors/token-limiter-processor)
157
159
  - [ToolCallFilter](https://mastra.ai/reference/processors/tool-call-filter)
@@ -632,8 +632,8 @@ import { ModelByInputTokens } from '@mastra/memory'
632
632
  const selector = new ModelByInputTokens({
633
633
  upTo: {
634
634
  10_000: 'google/gemini-2.5-flash', // Fast for small inputs
635
- 40_000: 'openai/gpt-4o', // Stronger for medium inputs
636
- 1_000_000: 'openai/gpt-4.5', // Most capable for large inputs
635
+ 40_000: 'openai/gpt-5.4-mini', // Stronger for medium inputs
636
+ 1_000_000: 'openai/gpt-5.4', // Most capable for large inputs
637
637
  },
638
638
  })
639
639
  ```
@@ -268,7 +268,7 @@ Model Generation attributes.
268
268
 
269
269
  ```typescript
270
270
  interface ModelGenerationAttributes {
271
- /** Model name (e.g., 'gpt-4', 'claude-3') */
271
+ /** Model name (e.g., 'gpt-5.4', 'claude-opus-4-6') */
272
272
  model?: string
273
273
 
274
274
  /** Model provider (e.g., 'openai', 'anthropic') */
@@ -45,7 +45,7 @@ const storage = new PostgresStorage({
45
45
  export const agent = new Agent({
46
46
  name: 'memory-agent',
47
47
  instructions: 'You are a helpful assistant with conversation memory',
48
- model: 'openai:gpt-4o',
48
+ model: 'openai/gpt-5.4',
49
49
  inputProcessors: [
50
50
  new MessageHistory({
51
51
  storage,
@@ -202,9 +202,9 @@ The method can return any combination of these properties:
202
202
  When multiple processors implement `processInputStep`, they run in order and changes chain through:
203
203
 
204
204
  ```text
205
- Processor 1: receives { model: 'gpt-4o' } → returns { model: 'gpt-4o-mini' }
206
- Processor 2: receives { model: 'gpt-4o-mini' } → returns { toolChoice: 'none' }
207
- Final: model = 'gpt-4o-mini', toolChoice = 'none'
205
+ Processor 1: receives { model: 'gpt-5.4' } → returns { model: 'gpt-5.4-mini' }
206
+ Processor 2: receives { model: 'gpt-5.4-mini' } → returns { toolChoice: 'none' }
207
+ Final: model = 'gpt-5.4-mini', toolChoice = 'none'
208
208
  ```
209
209
 
210
210
  #### System message isolation
@@ -82,7 +82,7 @@ const semanticRecall = new SemanticRecall({
82
82
  export const agent = new Agent({
83
83
  name: 'semantic-memory-agent',
84
84
  instructions: 'You are a helpful assistant with semantic memory recall',
85
- model: 'openai:gpt-4o',
85
+ model: 'openai/gpt-5.4',
86
86
  inputProcessors: [semanticRecall, new MessageHistory({ storage, lastMessages: 50 })],
87
87
  outputProcessors: [semanticRecall, new MessageHistory({ storage })],
88
88
  })
@@ -0,0 +1,93 @@
1
+ # SkillSearchProcessor
2
+
3
+ The `SkillSearchProcessor` is an **input processor** that enables on-demand skill discovery and loading. Instead of injecting all skill metadata into the system prompt upfront (like `SkillsProcessor`), it gives the agent two meta-tools (`search_skills` and `load_skill`) that let it find and load skills on demand. This reduces context token usage when workspaces have many skills.
4
+
5
+ ## Usage example
6
+
7
+ ```typescript
8
+ import { SkillSearchProcessor } from '@mastra/core/processors'
9
+
10
+ const skillSearch = new SkillSearchProcessor({
11
+ workspace,
12
+ search: {
13
+ topK: 5,
14
+ minScore: 0.1,
15
+ },
16
+ })
17
+ ```
18
+
19
+ ## Constructor parameters
20
+
21
+ **options** (`SkillSearchProcessorOptions`): Configuration options for the skill search processor
22
+
23
+ **options.workspace** (`Workspace`): Workspace instance containing skills. Skills are accessed via workspace.skills.
24
+
25
+ **options.search** (`{ topK?: number; minScore?: number }`): Configuration for the search behavior.
26
+
27
+ **options.search.topK** (`number`): Maximum number of skills to return in search results.
28
+
29
+ **options.search.minScore** (`number`): Minimum relevance score for including a skill in search results.
30
+
31
+ **options.ttl** (`number`): Time-to-live for thread state in milliseconds. After this duration of inactivity, thread state will be cleaned up. Set to 0 to disable cleanup.
32
+
33
+ ## Returns
34
+
35
+ **id** (`string`): Processor identifier set to 'skill-search'
36
+
37
+ **name** (`string`): Processor display name set to 'Skill Search Processor'
38
+
39
+ **processInputStep** (`(args: ProcessInputStepArgs) => Promise<ProcessInputStepResult>`): Processes each step to inject search/load meta-tools and any previously loaded skill instructions as system messages.
40
+
41
+ ## Extended usage example
42
+
43
+ ```typescript
44
+ import { Agent } from '@mastra/core/agent'
45
+ import { SkillSearchProcessor } from '@mastra/core/processors'
46
+ import { Workspace, LocalFilesystem } from '@mastra/core/workspace'
47
+
48
+ const workspace = new Workspace({
49
+ filesystem: new LocalFilesystem({ basePath: './project' }),
50
+ skills: ['/skills'],
51
+ bm25: true,
52
+ })
53
+
54
+ const skillSearch = new SkillSearchProcessor({
55
+ workspace,
56
+ search: {
57
+ topK: 5,
58
+ minScore: 0.1,
59
+ },
60
+ })
61
+
62
+ const agent = new Agent({
63
+ name: 'skill-agent',
64
+ instructions:
65
+ 'You are a helpful assistant. Use search_skills to find relevant skills, then load_skill to load their instructions.',
66
+ model: 'openai/gpt-4o',
67
+ workspace,
68
+ inputProcessors: [skillSearch],
69
+ })
70
+ ```
71
+
72
+ The agent workflow is:
73
+
74
+ 1. Agent receives a user message
75
+ 2. Agent calls `search_skills` with keywords (e.g., "api design")
76
+ 3. Agent reviews results and calls `load_skill` with the skill name
77
+ 4. The skill's instructions appear as system messages on the next turn
78
+ 5. Agent follows the loaded skill's instructions
79
+
80
+ ## Compared to SkillsProcessor
81
+
82
+ | | SkillsProcessor | SkillSearchProcessor |
83
+ | -------------- | -------------------------- | ---------------------------------- |
84
+ | Scaling | Injects all skills upfront | On-demand discovery |
85
+ | Context usage | Grows with skill count | Constant (only loaded skills) |
86
+ | Agent workflow | Skills always visible | Agent searches and loads as needed |
87
+ | Best for | Few skills (< 10) | Many skills (10+) |
88
+
89
+ ## Related
90
+
91
+ - [ToolSearchProcessor](https://mastra.ai/reference/processors/tool-search-processor)
92
+ - [Processors](https://mastra.ai/docs/agents/processors)
93
+ - [Workspace Skills](https://mastra.ai/docs/workspace/overview)
@@ -39,7 +39,7 @@ import { ToolCallFilter } from '@mastra/core/processors'
39
39
  export const agent = new Agent({
40
40
  name: 'filtered-agent',
41
41
  instructions: 'You are a helpful assistant',
42
- model: 'openai:gpt-4o',
42
+ model: 'openai/gpt-5.4',
43
43
  tools: {
44
44
  searchDatabase,
45
45
  sendEmail,
@@ -64,7 +64,7 @@ import { ToolCallFilter } from '@mastra/core/processors'
64
64
  export const agent = new Agent({
65
65
  name: 'no-tools-context-agent',
66
66
  instructions: 'You are a helpful assistant',
67
- model: 'openai:gpt-4o',
67
+ model: 'openai/gpt-5.4',
68
68
  tools: {
69
69
  searchDatabase,
70
70
  sendEmail,
@@ -67,7 +67,7 @@ const storage = new PostgresStorage({
67
67
  export const agent = new Agent({
68
68
  name: 'personalized-agent',
69
69
  instructions: 'You are a helpful assistant that remembers user preferences',
70
- model: 'openai:gpt-4o',
70
+ model: 'openai/gpt-5.4',
71
71
  inputProcessors: [
72
72
  new WorkingMemory({
73
73
  storage,
@@ -327,7 +327,7 @@ await agent.stream('message for agent', {
327
327
 
328
328
  ## OpenAI WebSocket transport
329
329
 
330
- Opt into OpenAI Responses WebSocket streaming via `providerOptions.openai.transport`. This only applies to streaming calls and is currently supported for direct OpenAI models (for example, `openai/gpt-4o`). If WebSocket streaming is unavailable, Mastra falls back to HTTP streaming. By default, Mastra closes the WebSocket when the stream finishes.
330
+ Opt into OpenAI Responses WebSocket streaming via `providerOptions.openai.transport`. This only applies to streaming calls and is currently supported for direct OpenAI models (for example, `openai/gpt-5.4`). If WebSocket streaming is unavailable, Mastra falls back to HTTP streaming. By default, Mastra closes the WebSocket when the stream finishes.
331
331
 
332
332
  ```ts
333
333
  const stream = await agent.stream('Hello', {
@@ -901,7 +901,7 @@ const mcpClient = new MCPClient({
901
901
  const agent = new Agent({
902
902
  name: 'My Agent',
903
903
  instructions: 'You are a helpful assistant.',
904
- model: openai('gpt-4o'),
904
+ model: openai('gpt-5.4'),
905
905
  tools: await mcpClient.listTools(),
906
906
  })
907
907
 
package/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # @mastra/mcp-docs-server
2
2
 
3
+ ## 1.1.17-alpha.4
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [[`404fea1`](https://github.com/mastra-ai/mastra/commit/404fea13042181f0b0c73a101392ac87c79ceae2), [`ebf5047`](https://github.com/mastra-ai/mastra/commit/ebf5047e825c38a1a356f10b214c1d4260dfcd8d), [`675f15b`](https://github.com/mastra-ai/mastra/commit/675f15b7eaeea649158d228ea635be40480c584d), [`b174c63`](https://github.com/mastra-ai/mastra/commit/b174c63a093108d4e53b9bc89a078d9f66202b3f), [`eef7cb2`](https://github.com/mastra-ai/mastra/commit/eef7cb2abe7ef15951e2fdf792a5095c6c643333), [`86e3263`](https://github.com/mastra-ai/mastra/commit/86e326363edd12be5a5b25ccce4a39f66f7c9f50), [`e8a5b0b`](https://github.com/mastra-ai/mastra/commit/e8a5b0b9bc94d12dee4150095512ca27a288d778)]:
8
+ - @mastra/core@1.17.0-alpha.2
9
+
10
+ ## 1.1.17-alpha.2
11
+
12
+ ### Patch Changes
13
+
14
+ - Updated dependencies [[`7302e5c`](https://github.com/mastra-ai/mastra/commit/7302e5ce0f52d769d3d63fb0faa8a7d4089cda6d)]:
15
+ - @mastra/core@1.16.1-alpha.1
16
+
3
17
  ## 1.1.17-alpha.0
4
18
 
5
19
  ### Patch Changes
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/mcp-docs-server",
3
- "version": "1.1.17-alpha.1",
3
+ "version": "1.1.17-alpha.5",
4
4
  "description": "MCP server for accessing Mastra.ai documentation, changelogs, and news.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -29,7 +29,7 @@
29
29
  "jsdom": "^26.1.0",
30
30
  "local-pkg": "^1.1.2",
31
31
  "zod": "^4.3.6",
32
- "@mastra/core": "1.16.1-alpha.0",
32
+ "@mastra/core": "1.17.0-alpha.2",
33
33
  "@mastra/mcp": "^1.3.1"
34
34
  },
35
35
  "devDependencies": {
@@ -46,9 +46,9 @@
46
46
  "tsx": "^4.21.0",
47
47
  "typescript": "^5.9.3",
48
48
  "vitest": "4.0.18",
49
- "@mastra/core": "1.16.1-alpha.0",
50
49
  "@internal/lint": "0.0.74",
51
- "@internal/types-builder": "0.0.49"
50
+ "@internal/types-builder": "0.0.49",
51
+ "@mastra/core": "1.17.0-alpha.2"
52
52
  },
53
53
  "homepage": "https://mastra.ai",
54
54
  "repository": {