@mastra/mcp-docs-server 1.1.28-alpha.0 → 1.1.28-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -83,9 +83,64 @@ Your processors run first, then memory persists messages.
83
83
 
84
84
  This ordering ensures that if your output guardrail calls `abort()`, memory processors are skipped and no messages are saved. See [Memory Processors](https://mastra.ai/docs/memory/memory-processors) for details.
85
85
 
86
+ ## Attach processors to an agent
87
+
88
+ Processors are configured on the agent through three arrays:
89
+
90
+ ```typescript
91
+ import { Agent } from '@mastra/core/agent'
92
+ import { PrefillErrorHandler, TokenLimiter, ModerationProcessor } from '@mastra/core/processors'
93
+
94
+ const agent = new Agent({
95
+ name: 'support-agent',
96
+ model: 'openai/gpt-5',
97
+ instructions: '...',
98
+ inputProcessors: [
99
+ new TokenLimiter(4000),
100
+ new ModerationProcessor({ model: 'openai/gpt-4.1-nano' }),
101
+ ],
102
+ outputProcessors: [new ModerationProcessor({ model: 'openai/gpt-4.1-nano' })],
103
+ errorProcessors: [new PrefillErrorHandler()],
104
+ })
105
+ ```
106
+
107
+ - `inputProcessors` run before the LLM.
108
+ - `outputProcessors` run during and after the LLM response.
109
+ - `errorProcessors` run when the LLM API call throws, so they can recover from provider errors.
110
+
111
+ Each array also accepts a function that returns an array, so processors can be built per-request from `RequestContext`:
112
+
113
+ ```typescript
114
+ new Agent({
115
+ // ...
116
+ inputProcessors: ({ requestContext }) => {
117
+ const limit = requestContext.get('tokenLimit') ?? 4000
118
+ return [new TokenLimiter(limit)]
119
+ },
120
+ })
121
+ ```
122
+
123
+ ### Override processors per call
124
+
125
+ `agent.generate()` and `agent.stream()` accept the same three arrays. When you pass one, it **replaces** the matching array on the agent for that call only. Memory, workspace, and other framework-managed processors still run around your array.
126
+
127
+ ```typescript
128
+ await agent.stream('Summarize this', {
129
+ inputProcessors: [new TokenLimiter(2000)],
130
+ maxProcessorRetries: 5,
131
+ })
132
+ ```
133
+
86
134
  ## Create custom processors
87
135
 
88
- Custom processors implement the `Processor` interface:
136
+ Custom processors implement the `Processor` interface.
137
+
138
+ Processor methods receive two arguments for accessing the conversation:
139
+
140
+ - `messages`: A snapshot array of `MastraDBMessage` objects for the current stage.
141
+ - `messageList`: The live `MessageList` instance. Use it to read other stages, or to add, remove, or replace messages in place.
142
+
143
+ Text lives in `message.content.parts`, not on `message.content` itself. Iterate `parts` and filter by `part.type === 'text'` to read user or assistant text. A flattened `message.content.content` string exists for legacy compatibility and can be used as a fallback. See [Message arguments](https://mastra.ai/reference/processors/processor-interface) in the `Processor` reference for full details.
89
144
 
90
145
  ### Transform input messages
91
146
 
@@ -97,12 +152,15 @@ export class CustomInputProcessor implements Processor {
97
152
  id = 'custom-input'
98
153
 
99
154
  async processInput({ messages }: ProcessInputArgs): Promise<MastraDBMessage[]> {
100
- // Transform messages before they reach the LLM
155
+ // Transform messages before they reach the LLM.
156
+ // Text lives in content.parts — iterate parts and rewrite text parts only.
101
157
  return messages.map(msg => ({
102
158
  ...msg,
103
159
  content: {
104
160
  ...msg.content,
105
- content: msg.content.content.toLowerCase(),
161
+ parts: msg.content.parts?.map(part =>
162
+ part.type === 'text' ? { ...part, text: part.text.toLowerCase() } : part,
163
+ ),
106
164
  },
107
165
  }))
108
166
  }
@@ -214,12 +272,23 @@ export class StreamFilter implements Processor {
214
272
  id = 'stream-filter'
215
273
 
216
274
  async processOutputStream({ part }): Promise<ChunkType | null> {
217
- // Transform or filter streaming chunks
275
+ // Drop text-delta chunks that contain the word "secret"
276
+ if (part.type === 'text-delta' && part.payload.text.includes('secret')) {
277
+ return null
278
+ }
279
+
280
+ // Return the (possibly modified) chunk to emit it
218
281
  return part
219
282
  }
220
283
  }
221
284
  ```
222
285
 
286
+ Return values:
287
+
288
+ - A `ChunkType` emits that chunk. Return the original `part` to pass it through unchanged.
289
+ - `null` or `undefined` drops the chunk. Both behave the same way, so a method that falls through without returning also drops the chunk.
290
+ - Dropping only affects one chunk. To stop the stream entirely, call `abort()`.
291
+
223
292
  To also receive custom `data-*` chunks emitted by tools via `writer.custom()`, set `processDataParts = true` on your processor. This lets you inspect, modify, or block tool-emitted data chunks before they reach the client.
224
293
 
225
294
  ### Validate each response
@@ -246,6 +315,31 @@ export class ResponseValidator implements Processor {
246
315
 
247
316
  For more on retry behavior, see [Retry mechanism](#retry-mechanism) in Advanced patterns.
248
317
 
318
+ ### Persist data across chunks and steps
319
+
320
+ Output methods receive a `state` object that persists for the lifetime of one request. State is keyed by the processor's `id`, so each processor sees only its own data, and it is shared between `processOutputStream`, `processOutputStep`, and `processOutputResult`. A new state object is created for every new `agent.generate()` or `agent.stream()` call.
321
+
322
+ ```typescript
323
+ import type { Processor } from '@mastra/core/processors'
324
+
325
+ export class WordCounter implements Processor {
326
+ id = 'word-counter'
327
+
328
+ async processOutputStream({ part, state }) {
329
+ state.wordCount ??= 0
330
+ if (part.type === 'text-delta') {
331
+ state.wordCount += part.payload.text.split(/\s+/).filter(Boolean).length
332
+ }
333
+ return part
334
+ }
335
+
336
+ async processOutputResult({ messages, state }) {
337
+ console.log(`Total words: ${state.wordCount}`)
338
+ return messages
339
+ }
340
+ }
341
+ ```
342
+
249
343
  ## Built-in utility processors
250
344
 
251
345
  Mastra provides utility processors for common tasks:
@@ -392,6 +486,23 @@ for await (const chunk of stream.fullStream) {
392
486
 
393
487
  Custom chunk types must use the `data-` prefix (e.g., `data-moderation-update`, `data-status`).
394
488
 
489
+ By default, `processOutputStream()` skips `data-*` chunks so it does not accidentally operate on tool telemetry or other processors' output. To inspect, modify, or block these chunks in a processor, set `processDataParts = true` on that processor:
490
+
491
+ ```typescript
492
+ class ModerationCollector implements Processor {
493
+ id = 'moderation-collector'
494
+ processDataParts = true
495
+
496
+ async processOutputStream({ part, state }) {
497
+ if (part.type === 'data-moderation-update') {
498
+ state.warnings ??= []
499
+ state.warnings.push(part.data)
500
+ }
501
+ return part
502
+ }
503
+ }
504
+ ```
505
+
395
506
  ### Add metadata to messages
396
507
 
397
508
  You can add custom metadata to messages in `processOutputResult`. This metadata is accessible via the response object:
@@ -527,7 +638,7 @@ const agent = new Agent({
527
638
  name: 'Quality Agent',
528
639
  model: 'openai/gpt-5.4',
529
640
  outputProcessors: [new QualityChecker()],
530
- maxProcessorRetries: 3, // Maximum retry attempts (default: 3)
641
+ maxProcessorRetries: 3, // Maximum retry attempts. If unset, retries are disabled (unless errorProcessors are configured, in which case it defaults to 10).
531
642
  })
532
643
  ```
533
644
 
@@ -538,6 +649,26 @@ The retry mechanism:
538
649
  - Tracks retry count via the `retryCount` parameter
539
650
  - Respects `maxProcessorRetries` limit on the agent
540
651
 
652
+ ### Abort and tripwire chunks
653
+
654
+ Calling `abort(reason, options)` throws a `TripWire` error that ends processing. On streams, Mastra emits a `tripwire` chunk clients can detect:
655
+
656
+ ```typescript
657
+ for await (const chunk of stream.fullStream) {
658
+ if (chunk.type === 'tripwire') {
659
+ console.log('Blocked by', chunk.payload.processorId, '-', chunk.payload.reason)
660
+ break
661
+ }
662
+ }
663
+ ```
664
+
665
+ For `agent.generate()`, the result exposes the same information as `result.tripwire` with `result.finishReason === 'other'`.
666
+
667
+ `abort` accepts a second options argument:
668
+
669
+ - `retry: true` asks the agent to retry instead of ending. Retries require `maxProcessorRetries` to be set on the agent or call.
670
+ - `metadata` attaches structured data to the `tripwire` chunk so downstream consumers can branch on categories like `pii`, `quality`, or `moderation`.
671
+
541
672
  ## API error handling
542
673
 
543
674
  The `processAPIError` method handles LLM API rejections — errors where the API rejects the request (such as 400 or 422 status codes) rather than network or server failures. This lets you modify the request and retry when the API rejects the message format.
@@ -0,0 +1,144 @@
1
+ # BrowserViewer
2
+
3
+ The `@mastra/browser-viewer` package provides browser automation for CLI-based tools like [agent-browser](https://www.npmjs.com/package/agent-browser), [browser-use](https://pypi.org/project/browser-use/), and [browse](https://www.npmjs.com/package/@browserbasehq/browse-cli). BrowserViewer launches Chrome via Playwright, exposes a Chrome DevTools Protocol (CDP) URL, and automatically injects it into CLI commands run through [workspace tools](https://mastra.ai/docs/workspace/overview).
4
+
5
+ ## When to use BrowserViewer
6
+
7
+ Use BrowserViewer when your agent drives a browser through a CLI tool rather than an SDK. BrowserViewer handles:
8
+
9
+ - Launching and managing Chrome for CLI tools to connect to
10
+ - Automatic CDP URL injection into `workspace_execute_command` calls
11
+ - Live screencast streaming to Studio
12
+ - Thread-scoped browser isolation
13
+
14
+ For SDK-based browser automation, use [AgentBrowser](https://mastra.ai/docs/browser/agent-browser) or [Stagehand](https://mastra.ai/docs/browser/stagehand) instead.
15
+
16
+ ## Quickstart
17
+
18
+ Install `@mastra/browser-viewer` and the CLI tool your agent will use. BrowserViewer manages Chrome, but the CLI tool itself must be installed separately.
19
+
20
+ **npm**:
21
+
22
+ ```bash
23
+ npm install @mastra/browser-viewer
24
+ ```
25
+
26
+ **pnpm**:
27
+
28
+ ```bash
29
+ pnpm add @mastra/browser-viewer
30
+ ```
31
+
32
+ **Yarn**:
33
+
34
+ ```bash
35
+ yarn add @mastra/browser-viewer
36
+ ```
37
+
38
+ **Bun**:
39
+
40
+ ```bash
41
+ bun add @mastra/browser-viewer
42
+ ```
43
+
44
+ Install the CLI tool in your workspace environment. This example uses `browser-use`:
45
+
46
+ ```bash
47
+ pip install browser-use
48
+ ```
49
+
50
+ Install the corresponding [skill](https://mastra.ai/docs/workspace/skills) so the agent knows how to use the CLI:
51
+
52
+ ```bash
53
+ npx skills add browser-use/browser-use --skill browser-use
54
+ ```
55
+
56
+ Create a workspace with BrowserViewer and assign it to an agent:
57
+
58
+ ```typescript
59
+ import { Mastra } from '@mastra/core/mastra'
60
+ import { Agent } from '@mastra/core/agent'
61
+ import { Workspace, LocalSandbox } from '@mastra/core/workspace'
62
+ import { BrowserViewer } from '@mastra/browser-viewer'
63
+
64
+ const workspace = new Workspace({
65
+ sandbox: new LocalSandbox({
66
+ workingDirectory: './workspace',
67
+ }),
68
+ browser: new BrowserViewer({
69
+ cli: 'browser-use',
70
+ headless: false,
71
+ }),
72
+ })
73
+
74
+ const browserAgent = new Agent({
75
+ id: 'browser-agent',
76
+ model: 'openai/gpt-5.4',
77
+ workspace,
78
+ instructions: `You are a web automation assistant.
79
+ Use browser-use commands to navigate and interact with websites.`,
80
+ })
81
+
82
+ export const mastra = new Mastra({
83
+ agents: { browserAgent },
84
+ })
85
+ ```
86
+
87
+ When the agent runs a CLI command like `browser-use open https://example.com`, Mastra automatically launches Chrome, injects the CDP connection, and starts streaming the screencast to Studio.
88
+
89
+ ## How it works
90
+
91
+ 1. The agent calls `workspace_execute_command` with a browser CLI command.
92
+ 2. Mastra detects the CLI command and launches Chrome via Playwright (if not already running).
93
+ 3. The CDP URL is injected into the command so the CLI connects to the managed browser.
94
+ 4. Screencast frames stream from page-level CDP sessions to Studio.
95
+
96
+ ## Supported CLIs
97
+
98
+ BrowserViewer supports three CLI providers. Each CLI must be installed separately in your workspace environment. Each CLI also publishes a [skill](https://mastra.ai/docs/workspace/skills) that teaches the agent its commands and workflows.
99
+
100
+ ### agent-browser
101
+
102
+ ```bash
103
+ npm install -g agent-browser
104
+ npx skills add vercel-labs/agent-browser
105
+ ```
106
+
107
+ CDP flag: `--cdp`
108
+
109
+ ### browser-use
110
+
111
+ ```bash
112
+ pip install browser-use
113
+ npx skills add browser-use/browser-use --skill browser-use
114
+ ```
115
+
116
+ CDP flag: `--cdp-url`
117
+
118
+ ### browse-cli
119
+
120
+ ```bash
121
+ npm install -g @browserbasehq/browse-cli
122
+ npx skills add browserbase/skills
123
+ ```
124
+
125
+ CDP flag: `--ws`
126
+
127
+ Set the `cli` option to match the CLI your agent uses:
128
+
129
+ ```typescript
130
+ const viewer = new BrowserViewer({
131
+ cli: 'browser-use',
132
+ headless: false,
133
+ })
134
+ ```
135
+
136
+ > **Note:** See [BrowserViewer reference](https://mastra.ai/reference/browser/browser-viewer) for all configuration options, advanced connection modes, and method details.
137
+
138
+ ## Related
139
+
140
+ - [Browser overview](https://mastra.ai/docs/browser/overview)
141
+ - [AgentBrowser](https://mastra.ai/docs/browser/agent-browser)
142
+ - [Stagehand](https://mastra.ai/docs/browser/stagehand)
143
+ - [Workspace overview](https://mastra.ai/docs/workspace/overview)
144
+ - [Workspace skills](https://mastra.ai/docs/workspace/skills)
@@ -1,11 +1,12 @@
1
1
  # Browser overview
2
2
 
3
- Browser support enables agents to navigate websites, interact with page elements, fill forms, and extract data. Mastra provides browser capabilities through SDK providers that wrap popular browser automation libraries.
3
+ Browser support enables agents to navigate websites, interact with page elements, fill forms, and extract data. Mastra provides browser capabilities through SDK providers that wrap browser automation libraries and a CLI provider for agents that drive browsers through command-line tools.
4
4
 
5
- Mastra supports two browser SDK providers:
5
+ Mastra supports two SDK providers and one CLI provider:
6
6
 
7
7
  - [**AgentBrowser**](https://mastra.ai/docs/browser/agent-browser): A Playwright-based provider with accessibility-first element targeting. Best for general web automation and scraping.
8
8
  - [**Stagehand**](https://mastra.ai/docs/browser/stagehand): A Browserbase provider with AI-powered element detection. Best for complex interactions that benefit from natural language selectors.
9
+ - [**BrowserViewer**](https://mastra.ai/docs/browser/browser-viewer): A CLI provider that launches Chrome and injects CDP URLs into CLI tools like agent-browser, browser-use, and browse. Best for workspace agents that drive browsers through shell commands.
9
10
 
10
11
  ## When to use browser
11
12
 
@@ -115,6 +116,34 @@ This works with any [CDP-compatible](https://chromedevtools.github.io/devtools-p
115
116
 
116
117
  Browser providers stream a live video feed of the browser to the Mastra Studio UI. This lets you watch the agent interact with pages in real-time.
117
118
 
119
+ Screencast requires WebSocket support. Install these packages in your project:
120
+
121
+ **npm**:
122
+
123
+ ```bash
124
+ npm install ws @hono/node-ws
125
+ ```
126
+
127
+ **pnpm**:
128
+
129
+ ```bash
130
+ pnpm add ws @hono/node-ws
131
+ ```
132
+
133
+ **Yarn**:
134
+
135
+ ```bash
136
+ yarn add ws @hono/node-ws
137
+ ```
138
+
139
+ **Bun**:
140
+
141
+ ```bash
142
+ bun add ws @hono/node-ws
143
+ ```
144
+
145
+ > **Note:** These packages are not included by default because they are incompatible with serverless environments like Cloudflare Workers. If they are not installed, screencast is disabled but all other browser functionality works normally.
146
+
118
147
  Screencast is enabled by default and can be configured:
119
148
 
120
149
  ```typescript
@@ -133,4 +162,5 @@ const browser = new AgentBrowser({
133
162
 
134
163
  - [AgentBrowser](https://mastra.ai/docs/browser/agent-browser)
135
164
  - [Stagehand](https://mastra.ai/docs/browser/stagehand)
165
+ - [BrowserViewer](https://mastra.ai/docs/browser/browser-viewer)
136
166
  - [MastraBrowser reference](https://mastra.ai/reference/browser/mastra-browser)
@@ -8,10 +8,11 @@ LSP inspection gives workspace-backed agents semantic code intelligence. When yo
8
8
 
9
9
  Use LSP inspection when your agent needs semantic code understanding instead of plain-text search alone:
10
10
 
11
- - Inspect TypeScript or JavaScript symbols and their inferred types
11
+ - Inspect symbols and their inferred types in any supported language
12
12
  - Find where a symbol is declared before editing related code
13
13
  - Explore implementations across a codebase without manually tracing every file
14
14
  - Combine semantic inspection with `view` and `search_content` for faster navigation
15
+ - Add LSP support for additional languages by [registering custom language servers](#custom-language-servers)
15
16
 
16
17
  ## Basic usage
17
18
 
@@ -84,9 +85,9 @@ const workspace = new Workspace({
84
85
  lsp: {
85
86
  diagnosticTimeout: 4000,
86
87
  initTimeout: 8000,
87
- disableServers: ['pyright'],
88
+ disableServers: ['eslint'],
88
89
  binaryOverrides: {
89
- typescript: '/custom/path/to/typescript-language-server',
90
+ typescript: '/custom/path/to/typescript-language-server --stdio',
90
91
  },
91
92
  searchPaths: ['/opt/homebrew/bin'],
92
93
  },
@@ -100,9 +101,80 @@ Use custom configuration when you need to:
100
101
  - Point Mastra at custom language server binaries
101
102
  - Add extra binary search paths in constrained environments
102
103
 
104
+ ## Custom language servers
105
+
106
+ By default, Mastra includes built-in support for TypeScript, JavaScript, Python, Go, and Rust. To use LSP inspection with other languages (e.g. PHP, Ruby, Java, Kotlin, Swift, Elixir), register a custom language server via the `servers` field:
107
+
108
+ ```typescript
109
+ import { Workspace, LocalFilesystem, LocalSandbox } from '@mastra/core/workspace'
110
+
111
+ const workspace = new Workspace({
112
+ filesystem: new LocalFilesystem({ basePath: './workspace' }),
113
+ sandbox: new LocalSandbox({ workingDirectory: './workspace' }),
114
+ lsp: {
115
+ servers: {
116
+ phpactor: {
117
+ id: 'phpactor',
118
+ name: 'Phpactor Language Server',
119
+ languageIds: ['php'],
120
+ extensions: ['.php'],
121
+ markers: ['composer.json'],
122
+ command: 'phpactor language-server',
123
+ },
124
+ },
125
+ },
126
+ })
127
+ ```
128
+
129
+ Each custom server definition requires these fields:
130
+
131
+ | Field | Description |
132
+ | ------------- | ------------------------------------------------------------------------------------- |
133
+ | `id` | Unique identifier for the server |
134
+ | `name` | Human-readable name shown in logs |
135
+ | `languageIds` | Language Server Protocol (LSP) language identifiers this server handles |
136
+ | `extensions` | File extensions, including the dot |
137
+ | `markers` | Files or directories that identify the project root (e.g. `composer.json`, `Gemfile`) |
138
+ | `command` | Full command string to start the server |
139
+
140
+ When a server has multiple language IDs, Mastra maps each extension to the first entry in `languageIds`.
141
+
142
+ You can also pass optional `initializationOptions` to send custom settings during the LSP handshake.
143
+
144
+ Custom servers are merged with built-in servers. To replace a built-in server, use the same `id` (e.g. `id: 'go'` replaces the built-in Go server). Register multiple servers to support several languages at once:
145
+
146
+ ```typescript
147
+ import { Workspace, LocalFilesystem, LocalSandbox } from '@mastra/core/workspace'
148
+
149
+ const workspace = new Workspace({
150
+ filesystem: new LocalFilesystem({ basePath: './workspace' }),
151
+ sandbox: new LocalSandbox({ workingDirectory: './workspace' }),
152
+ lsp: {
153
+ servers: {
154
+ phpactor: {
155
+ id: 'phpactor',
156
+ name: 'Phpactor Language Server',
157
+ languageIds: ['php'],
158
+ extensions: ['.php'],
159
+ markers: ['composer.json'],
160
+ command: 'phpactor language-server',
161
+ },
162
+ solargraph: {
163
+ id: 'solargraph',
164
+ name: 'Solargraph',
165
+ languageIds: ['ruby'],
166
+ extensions: ['.rb', '.erb'],
167
+ markers: ['Gemfile'],
168
+ command: 'solargraph stdio',
169
+ },
170
+ },
171
+ },
172
+ })
173
+ ```
174
+
103
175
  ## Requirements and limitations
104
176
 
105
- - LSP inspection only works for file types with a matching language server
177
+ - LSP inspection only works for file types with a matching built-in or custom language server
106
178
  - The `path` you inspect must resolve inside the workspace filesystem or allowed paths
107
179
  - External package inspection may resolve to declaration files such as `.d.ts` instead of runtime source files
108
180
  - `lsp_inspect` complements `view` and `search_content`, but does not replace reading implementation code when you need full context
@@ -19,6 +19,7 @@ A sandbox provider executes commands in a controlled environment:
19
19
  - [`BlaxelSandbox`](https://mastra.ai/reference/workspace/blaxel-sandbox): Executes commands in isolated Blaxel cloud sandboxes
20
20
  - [`DaytonaSandbox`](https://mastra.ai/reference/workspace/daytona-sandbox): Executes commands in isolated Daytona cloud sandboxes
21
21
  - [`E2BSandbox`](https://mastra.ai/reference/workspace/e2b-sandbox): Executes commands in isolated E2B cloud sandboxes
22
+ - [`ModalSandbox`](https://mastra.ai/reference/workspace/modal-sandbox): Executes commands in isolated Modal cloud sandboxes
22
23
 
23
24
  ## Basic usage
24
25
 
@@ -110,7 +111,7 @@ const workspace = new Workspace({
110
111
  })
111
112
  ```
112
113
 
113
- Use `null` or `false` for cloud sandboxes (for example, E2B or Daytona) where processes should outlive the agent.
114
+ Use `null` or `false` for cloud sandboxes (for example, E2B, Daytona, or Modal) where processes should outlive the agent.
114
115
 
115
116
  > **Note:** For the full `SandboxProcessManager` API (spawning processes programmatically, reading output, sending stdin), see the [`SandboxProcessManager` reference](https://mastra.ai/reference/workspace/process-manager).
116
117
 
@@ -118,6 +119,7 @@ Use `null` or `false` for cloud sandboxes (for example, E2B or Daytona) where pr
118
119
 
119
120
  - [`SandboxProcessManager` reference](https://mastra.ai/reference/workspace/process-manager)
120
121
  - [`LocalSandbox` reference](https://mastra.ai/reference/workspace/local-sandbox)
122
+ - [`ModalSandbox` reference](https://mastra.ai/reference/workspace/modal-sandbox)
121
123
  - [`DaytonaSandbox` reference](https://mastra.ai/reference/workspace/daytona-sandbox)
122
124
  - [`E2BSandbox` reference](https://mastra.ai/reference/workspace/e2b-sandbox)
123
125
  - [Workspace overview](https://mastra.ai/docs/workspace/overview)
@@ -1,6 +1,6 @@
1
1
  # Model Providers
2
2
 
3
- Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3707 models from 104 providers through a single API.
3
+ Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3723 models from 104 providers through a single API.
4
4
 
5
5
  ## Features
6
6
 
@@ -1,6 +1,6 @@
1
1
  # ![Chutes logo](https://models.dev/logos/chutes.svg)Chutes
2
2
 
3
- Access 69 Chutes models through Mastra's model router. Authentication is handled automatically using the `CHUTES_API_KEY` environment variable.
3
+ Access 70 Chutes models through Mastra's model router. Authentication is handled automatically using the `CHUTES_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Chutes documentation](https://llm.chutes.ai).
6
6
 
@@ -52,6 +52,7 @@ for await (const chunk of stream) {
52
52
  | `chutes/moonshotai/Kimi-K2-Instruct-0905` | 262K | | | | | | $0.39 | $2 |
53
53
  | `chutes/moonshotai/Kimi-K2-Thinking-TEE` | 262K | | | | | | $0.40 | $2 |
54
54
  | `chutes/moonshotai/Kimi-K2.5-TEE` | 262K | | | | | | $0.60 | $3 |
55
+ | `chutes/moonshotai/Kimi-K2.6-TEE` | 262K | | | | | | $0.44 | $2 |
55
56
  | `chutes/NousResearch/DeepHermes-3-Mistral-24B-Preview` | 33K | | | | | | $0.02 | $0.10 |
56
57
  | `chutes/NousResearch/Hermes-4-14B` | 41K | | | | | | $0.01 | $0.05 |
57
58
  | `chutes/NousResearch/Hermes-4-405B-FP8-TEE` | 131K | | | | | | $0.30 | $1 |
@@ -1,6 +1,6 @@
1
1
  # ![Cortecs logo](https://models.dev/logos/cortecs.svg)Cortecs
2
2
 
3
- Access 33 Cortecs models through Mastra's model router. Authentication is handled automatically using the `CORTECS_API_KEY` environment variable.
3
+ Access 34 Cortecs models through Mastra's model router. Authentication is handled automatically using the `CORTECS_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Cortecs documentation](https://cortecs.ai).
6
6
 
@@ -57,6 +57,7 @@ for await (const chunk of stream) {
57
57
  | `cortecs/kimi-k2-instruct` | 131K | | | | | | $0.55 | $3 |
58
58
  | `cortecs/kimi-k2-thinking` | 262K | | | | | | $0.66 | $3 |
59
59
  | `cortecs/kimi-k2.5` | 256K | | | | | | $0.55 | $3 |
60
+ | `cortecs/kimi-k2.6` | 256K | | | | | | $0.81 | $4 |
60
61
  | `cortecs/llama-3.1-405b-instruct` | 128K | | | | | | — | — |
61
62
  | `cortecs/minimax-m2` | 400K | | | | | | $0.39 | $2 |
62
63
  | `cortecs/minimax-m2.1` | 196K | | | | | | $0.34 | $1 |
@@ -1,6 +1,6 @@
1
1
  # ![Deep Infra logo](https://models.dev/logos/deepinfra.svg)Deep Infra
2
2
 
3
- Access 31 Deep Infra models through Mastra's model router. Authentication is handled automatically using the `DEEPINFRA_API_KEY` environment variable.
3
+ Access 32 Deep Infra models through Mastra's model router. Authentication is handled automatically using the `DEEPINFRA_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Deep Infra documentation](https://deepinfra.com/models).
6
6
 
@@ -50,6 +50,7 @@ for await (const chunk of stream) {
50
50
  | `deepinfra/moonshotai/Kimi-K2-Instruct-0905` | 262K | | | | | | $0.40 | $2 |
51
51
  | `deepinfra/moonshotai/Kimi-K2-Thinking` | 131K | | | | | | $0.47 | $2 |
52
52
  | `deepinfra/moonshotai/Kimi-K2.5` | 262K | | | | | | $0.50 | $3 |
53
+ | `deepinfra/moonshotai/Kimi-K2.6` | 262K | | | | | | $0.75 | $4 |
53
54
  | `deepinfra/openai/gpt-oss-120b` | 131K | | | | | | $0.05 | $0.24 |
54
55
  | `deepinfra/openai/gpt-oss-20b` | 131K | | | | | | $0.03 | $0.14 |
55
56
  | `deepinfra/Qwen/Qwen3-Coder-480B-A35B-Instruct` | 262K | | | | | | $0.40 | $2 |
@@ -67,7 +67,7 @@ for await (const chunk of stream) {
67
67
  | `google/gemma-3-4b-it` | 33K | | | | | | — | — |
68
68
  | `google/gemma-3n-e2b-it` | 8K | | | | | | — | — |
69
69
  | `google/gemma-3n-e4b-it` | 8K | | | | | | — | — |
70
- | `google/gemma-4-26b-it` | 256K | | | | | | — | — |
70
+ | `google/gemma-4-26b-a4b-it` | 256K | | | | | | — | — |
71
71
  | `google/gemma-4-31b-it` | 256K | | | | | | — | — |
72
72
 
73
73
  ## Advanced configuration
@@ -1,6 +1,6 @@
1
1
  # ![NovitaAI logo](https://models.dev/logos/novita-ai.svg)NovitaAI
2
2
 
3
- Access 90 NovitaAI models through Mastra's model router. Authentication is handled automatically using the `NOVITA_API_KEY` environment variable.
3
+ Access 96 NovitaAI models through Mastra's model router. Authentication is handled automatically using the `NOVITA_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [NovitaAI documentation](https://novita.ai/docs/guides/introduction).
6
6
 
@@ -47,6 +47,8 @@ for await (const chunk of stream) {
47
47
  | `novita-ai/deepseek/deepseek-r1-0528` | 164K | | | | | | $0.70 | $3 |
48
48
  | `novita-ai/deepseek/deepseek-r1-0528-qwen3-8b` | 128K | | | | | | $0.06 | $0.09 |
49
49
  | `novita-ai/deepseek/deepseek-r1-distill-llama-70b` | 8K | | | | | | $0.80 | $0.80 |
50
+ | `novita-ai/deepseek/deepseek-r1-distill-qwen-14b` | 33K | | | | | | $0.15 | $0.15 |
51
+ | `novita-ai/deepseek/deepseek-r1-distill-qwen-32b` | 64K | | | | | | $0.30 | $0.30 |
50
52
  | `novita-ai/deepseek/deepseek-r1-turbo` | 64K | | | | | | $0.70 | $3 |
51
53
  | `novita-ai/deepseek/deepseek-v3-0324` | 164K | | | | | | $0.27 | $1 |
52
54
  | `novita-ai/deepseek/deepseek-v3-turbo` | 64K | | | | | | $0.40 | $1 |
@@ -54,14 +56,17 @@ for await (const chunk of stream) {
54
56
  | `novita-ai/deepseek/deepseek-v3.1-terminus` | 131K | | | | | | $0.27 | $1 |
55
57
  | `novita-ai/deepseek/deepseek-v3.2` | 164K | | | | | | $0.27 | $0.40 |
56
58
  | `novita-ai/deepseek/deepseek-v3.2-exp` | 164K | | | | | | $0.27 | $0.41 |
59
+ | `novita-ai/google/gemma-3-12b-it` | 131K | | | | | | $0.05 | $0.10 |
57
60
  | `novita-ai/google/gemma-3-27b-it` | 98K | | | | | | $0.12 | $0.20 |
58
61
  | `novita-ai/google/gemma-4-26b-a4b-it` | 262K | | | | | | $0.13 | $0.40 |
59
62
  | `novita-ai/google/gemma-4-31b-it` | 262K | | | | | | $0.14 | $0.40 |
60
63
  | `novita-ai/gryphe/mythomax-l2-13b` | 4K | | | | | | $0.09 | $0.09 |
64
+ | `novita-ai/inclusionai/ling-2.6-1t` | 262K | | | | | | — | — |
61
65
  | `novita-ai/kwaipilot/kat-coder-pro` | 256K | | | | | | $0.30 | $1 |
62
66
  | `novita-ai/meta-llama/llama-3-70b-instruct` | 8K | | | | | | $0.51 | $0.74 |
63
67
  | `novita-ai/meta-llama/llama-3-8b-instruct` | 8K | | | | | | $0.04 | $0.04 |
64
68
  | `novita-ai/meta-llama/llama-3.1-8b-instruct` | 16K | | | | | | $0.02 | $0.05 |
69
+ | `novita-ai/meta-llama/llama-3.2-3b-instruct` | 33K | | | | | | $0.03 | $0.05 |
65
70
  | `novita-ai/meta-llama/llama-3.3-70b-instruct` | 131K | | | | | | $0.14 | $0.40 |
66
71
  | `novita-ai/meta-llama/llama-4-maverick-17b-128e-instruct-fp8` | 1.0M | | | | | | $0.27 | $0.85 |
67
72
  | `novita-ai/meta-llama/llama-4-scout-17b-16e-instruct` | 131K | | | | | | $0.18 | $0.59 |
@@ -77,6 +82,7 @@ for await (const chunk of stream) {
77
82
  | `novita-ai/moonshotai/kimi-k2-instruct` | 131K | | | | | | $0.57 | $2 |
78
83
  | `novita-ai/moonshotai/kimi-k2-thinking` | 262K | | | | | | $0.60 | $3 |
79
84
  | `novita-ai/moonshotai/kimi-k2.5` | 262K | | | | | | $0.60 | $3 |
85
+ | `novita-ai/moonshotai/kimi-k2.6` | 262K | | | | | | $0.95 | $4 |
80
86
  | `novita-ai/nousresearch/hermes-2-pro-llama-3-8b` | 8K | | | | | | $0.14 | $0.14 |
81
87
  | `novita-ai/openai/gpt-oss-120b` | 131K | | | | | | $0.05 | $0.25 |
82
88
  | `novita-ai/openai/gpt-oss-20b` | 131K | | | | | | $0.04 | $0.15 |
@@ -1,6 +1,6 @@
1
1
  # ![OpenAI logo](https://models.dev/logos/openai.svg)OpenAI
2
2
 
3
- Access 50 OpenAI models through Mastra's model router. Authentication is handled automatically using the `OPENAI_API_KEY` environment variable.
3
+ Access 51 OpenAI models through Mastra's model router. Authentication is handled automatically using the `OPENAI_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [OpenAI documentation](https://platform.openai.com/docs/models).
6
6
 
@@ -66,6 +66,7 @@ for await (const chunk of stream) {
66
66
  | `openai/gpt-5.4-mini` | 400K | | | | | | $0.75 | $5 |
67
67
  | `openai/gpt-5.4-nano` | 400K | | | | | | $0.20 | $1 |
68
68
  | `openai/gpt-5.4-pro` | 1.1M | | | | | | $30 | $180 |
69
+ | `openai/gpt-5.5` | 1.1M | | | | | | $5 | $30 |
69
70
  | `openai/gpt-image-1` | — | | | | | | — | — |
70
71
  | `openai/gpt-image-1-mini` | — | | | | | | — | — |
71
72
  | `openai/gpt-image-1.5` | — | | | | | | — | — |