@mastra/mcp-docs-server 1.1.28 → 1.1.29-alpha.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/docs/agents/background-tasks.md +242 -0
- package/.docs/docs/agents/channels.md +2 -1
- package/.docs/docs/agents/supervisor-agents.md +35 -4
- package/.docs/docs/agents/using-tools.md +1 -0
- package/.docs/docs/browser/overview.md +1 -0
- package/.docs/docs/evals/custom-scorers.md +60 -0
- package/.docs/docs/streaming/background-task-streaming.md +80 -0
- package/.docs/docs/streaming/overview.md +3 -0
- package/.docs/docs/workspace/filesystem.md +3 -1
- package/.docs/docs/workspace/overview.md +1 -1
- package/.docs/docs/workspace/search.md +2 -2
- package/.docs/docs/workspace/skills.md +16 -16
- package/.docs/guides/build-your-ui/ai-sdk-ui.md +5 -3
- package/.docs/guides/guide/code-review-bot.md +2 -2
- package/.docs/guides/guide/dev-assistant.md +4 -4
- package/.docs/guides/guide/slack-assistant.md +191 -0
- package/.docs/models/gateways/azure-openai.md +25 -25
- package/.docs/models/gateways/mastra.md +64 -0
- package/.docs/models/gateways/netlify.md +5 -1
- package/.docs/models/gateways/openrouter.md +8 -1
- package/.docs/models/gateways/vercel.md +7 -1
- package/.docs/models/gateways.md +1 -0
- package/.docs/models/index.md +4 -4
- package/.docs/models/providers/abliteration-ai.md +71 -0
- package/.docs/models/providers/alibaba-cn.md +4 -1
- package/.docs/models/providers/alibaba.md +6 -1
- package/.docs/models/providers/deepseek.md +6 -4
- package/.docs/models/providers/huggingface.md +2 -1
- package/.docs/models/providers/llmgateway.md +4 -1
- package/.docs/models/providers/novita-ai.md +4 -1
- package/.docs/models/providers/nvidia.md +3 -1
- package/.docs/models/providers/ollama-cloud.md +3 -1
- package/.docs/models/providers/opencode-go.md +20 -18
- package/.docs/models/providers/opencode.md +3 -2
- package/.docs/models/providers/poe.md +3 -1
- package/.docs/models/providers/togetherai.md +2 -1
- package/.docs/models/providers/xiaomi-token-plan-ams.md +9 -7
- package/.docs/models/providers/xiaomi-token-plan-cn.md +9 -7
- package/.docs/models/providers/xiaomi-token-plan-sgp.md +9 -7
- package/.docs/models/providers/xiaomi.md +4 -2
- package/.docs/models/providers/zai-coding-plan.md +11 -20
- package/.docs/models/providers/zhipuai-coding-plan.md +11 -21
- package/.docs/models/providers.md +1 -0
- package/.docs/reference/client-js/agents.md +44 -0
- package/.docs/reference/configuration.md +63 -0
- package/.docs/reference/evals/create-scorer.md +2 -0
- package/.docs/reference/evals/filter-run.md +117 -0
- package/.docs/reference/index.md +3 -0
- package/.docs/reference/memory/clone-utilities.md +4 -2
- package/.docs/reference/memory/cloneThread.md +4 -2
- package/.docs/reference/processors/skill-search-processor.md +1 -1
- package/.docs/reference/server/routes.md +9 -8
- package/.docs/reference/streaming/ChunkType.md +140 -0
- package/.docs/reference/streaming/agents/streamUntilIdle.md +94 -0
- package/.docs/reference/workspace/azure-blob-filesystem.md +219 -0
- package/.docs/reference/workspace/gcs-filesystem.md +1 -0
- package/.docs/reference/workspace/s3-filesystem.md +1 -0
- package/.docs/reference/workspace/workspace-class.md +1 -1
- package/CHANGELOG.md +42 -0
- package/package.json +4 -4
|
@@ -255,8 +255,10 @@ const { messages, sendMessage } = useChat({
|
|
|
255
255
|
return {
|
|
256
256
|
body: {
|
|
257
257
|
messages: [messages[messages.length - 1]],
|
|
258
|
-
|
|
259
|
-
|
|
258
|
+
memory: {
|
|
259
|
+
thread: 'user-thread-123',
|
|
260
|
+
resource: 'user-123',
|
|
261
|
+
},
|
|
260
262
|
},
|
|
261
263
|
}
|
|
262
264
|
},
|
|
@@ -264,7 +266,7 @@ const { messages, sendMessage } = useChat({
|
|
|
264
266
|
})
|
|
265
267
|
```
|
|
266
268
|
|
|
267
|
-
Set `
|
|
269
|
+
Set `memory.thread` and `memory.resource` from your app's own state, such as URL params, auth context, or your database.
|
|
268
270
|
|
|
269
271
|
See [Message history](https://mastra.ai/docs/memory/message-history) for more on how Mastra memory loads and stores messages.
|
|
270
272
|
|
|
@@ -21,7 +21,7 @@ const workspace = new Workspace({
|
|
|
21
21
|
filesystem: new LocalFilesystem({
|
|
22
22
|
basePath: resolve(import.meta.dirname, '../../workspace'),
|
|
23
23
|
}),
|
|
24
|
-
skills: ['
|
|
24
|
+
skills: ['skills'],
|
|
25
25
|
})
|
|
26
26
|
|
|
27
27
|
export const mastra = new Mastra({
|
|
@@ -138,7 +138,7 @@ const workspace = new Workspace({
|
|
|
138
138
|
filesystem: new LocalFilesystem({
|
|
139
139
|
basePath: resolve(import.meta.dirname, '../../workspace'),
|
|
140
140
|
}),
|
|
141
|
-
skills: ['
|
|
141
|
+
skills: ['skills'],
|
|
142
142
|
})
|
|
143
143
|
|
|
144
144
|
export const mastra = new Mastra({
|
|
@@ -57,9 +57,9 @@ import { Workspace, LocalFilesystem, LocalSandbox } from '@mastra/core/workspace
|
|
|
57
57
|
const workspace = new Workspace({
|
|
58
58
|
filesystem: new LocalFilesystem({ basePath: resolve(import.meta.dirname, '../../workspace') }),
|
|
59
59
|
sandbox: new LocalSandbox({ workingDirectory: resolve(import.meta.dirname, '../../workspace') }),
|
|
60
|
-
skills: ['
|
|
60
|
+
skills: ['skills'],
|
|
61
61
|
bm25: true,
|
|
62
|
-
autoIndexPaths: ['
|
|
62
|
+
autoIndexPaths: ['docs', 'src'],
|
|
63
63
|
})
|
|
64
64
|
|
|
65
65
|
export const mastra = new Mastra({
|
|
@@ -210,9 +210,9 @@ import { devAssistant } from './agents/dev-assistant'
|
|
|
210
210
|
const workspace = new Workspace({
|
|
211
211
|
filesystem: new LocalFilesystem({ basePath: resolve(import.meta.dirname, '../../workspace') }),
|
|
212
212
|
sandbox: new LocalSandbox({ workingDirectory: resolve(import.meta.dirname, '../../workspace') }),
|
|
213
|
-
skills: ['
|
|
213
|
+
skills: ['skills'],
|
|
214
214
|
bm25: true,
|
|
215
|
-
autoIndexPaths: ['
|
|
215
|
+
autoIndexPaths: ['docs', 'src'],
|
|
216
216
|
})
|
|
217
217
|
|
|
218
218
|
export const mastra = new Mastra({
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
# Building a Slack assistant
|
|
2
|
+
|
|
3
|
+
In this guide, you'll build a Mastra agent that responds to messages and mentions on Slack. You'll learn how to configure a channel adapter, set up a Slack app with the right permissions, connect it to your agent via a webhook, and test the interaction.
|
|
4
|
+
|
|
5
|
+
## Prerequisites
|
|
6
|
+
|
|
7
|
+
- Node.js `v22.13.0` or later installed
|
|
8
|
+
- An API key from a supported [Model Provider](https://mastra.ai/models)
|
|
9
|
+
- An existing Mastra project. Follow the [installation guide](https://mastra.ai/guides/getting-started/quickstart) if needed.
|
|
10
|
+
- A [Slack workspace](https://slack.com/) where you can create apps
|
|
11
|
+
|
|
12
|
+
## Create the agent
|
|
13
|
+
|
|
14
|
+
Install the Slack adapter:
|
|
15
|
+
|
|
16
|
+
**npm**:
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
npm install @chat-adapter/slack
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
**pnpm**:
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
pnpm add @chat-adapter/slack
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
**Yarn**:
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
yarn add @chat-adapter/slack
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
**Bun**:
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
bun add @chat-adapter/slack
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
Create a new file `src/mastra/agents/slack-agent.ts` and define your agent:
|
|
41
|
+
|
|
42
|
+
```ts
|
|
43
|
+
import { Agent } from '@mastra/core/agent'
|
|
44
|
+
import { createSlackAdapter } from '@chat-adapter/slack'
|
|
45
|
+
|
|
46
|
+
export const slackAgent = new Agent({
|
|
47
|
+
id: 'slack-agent',
|
|
48
|
+
name: 'Slack Agent',
|
|
49
|
+
instructions:
|
|
50
|
+
'You are a helpful assistant. Answer questions, help with tasks, and have natural conversations.',
|
|
51
|
+
model: 'anthropic/claude-opus-4-6',
|
|
52
|
+
channels: {
|
|
53
|
+
adapters: {
|
|
54
|
+
slack: createSlackAdapter(),
|
|
55
|
+
},
|
|
56
|
+
},
|
|
57
|
+
})
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
The `channels` property tells Mastra to generate a webhook endpoint for each adapter. In this case, the Slack adapter handles event verification, signature validation, and message formatting automatically.
|
|
61
|
+
|
|
62
|
+
Register the agent in your `src/mastra/index.ts` file:
|
|
63
|
+
|
|
64
|
+
```ts
|
|
65
|
+
import { Mastra } from '@mastra/core'
|
|
66
|
+
import { slackAgent } from './agents/slack-agent'
|
|
67
|
+
|
|
68
|
+
export const mastra = new Mastra({
|
|
69
|
+
agents: { slackAgent },
|
|
70
|
+
})
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Create a Slack app
|
|
74
|
+
|
|
75
|
+
You need a Slack app to connect your agent to a workspace.
|
|
76
|
+
|
|
77
|
+
Go to <https://api.slack.com/apps> and select **Create New App** > **From scratch**. Give it a name and select your workspace.
|
|
78
|
+
|
|
79
|
+
Navigate to **OAuth & Permissions** and scroll to **Bot Token Scopes**. Add the following scopes:
|
|
80
|
+
|
|
81
|
+
- `app_mentions:read`
|
|
82
|
+
- `channels:history`
|
|
83
|
+
- `channels:read`
|
|
84
|
+
- `chat:write`
|
|
85
|
+
- `users:read`
|
|
86
|
+
|
|
87
|
+
At the top of **OAuth & Permissions**, select **Install to Workspace**. Copy the **Bot User OAuth Token** (`xoxb-...`).
|
|
88
|
+
|
|
89
|
+
Go to **Basic Information** > **App Credentials** and copy the **Signing Secret** (e.g. `c3a4...`).
|
|
90
|
+
|
|
91
|
+
Ensure **Socket Mode** is turned **off** under **Settings** > **Socket Mode**.
|
|
92
|
+
|
|
93
|
+
Add the credentials to your `.env` file:
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
SLACK_SIGNING_SECRET=your-signing-secret
|
|
97
|
+
SLACK_BOT_TOKEN=xoxb-your-bot-token
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
The adapter reads these environment variables by default. See the [Chat SDK Slack adapter docs](https://chat-sdk.dev/adapters/slack) for more details.
|
|
101
|
+
|
|
102
|
+
## Connect the webhook
|
|
103
|
+
|
|
104
|
+
Slack delivers events to your agent via a webhook. Mastra generates this endpoint automatically for each channel adapter.
|
|
105
|
+
|
|
106
|
+
Start the dev server:
|
|
107
|
+
|
|
108
|
+
**npm**:
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
npm run dev
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
**pnpm**:
|
|
115
|
+
|
|
116
|
+
```bash
|
|
117
|
+
pnpm run dev
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
**Yarn**:
|
|
121
|
+
|
|
122
|
+
```bash
|
|
123
|
+
yarn dev
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
**Bun**:
|
|
127
|
+
|
|
128
|
+
```bash
|
|
129
|
+
bun run dev
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
During local development, Slack needs a public URL to reach your local server. Open a new terminal and start a tunnel:
|
|
133
|
+
|
|
134
|
+
```bash
|
|
135
|
+
npx cloudflared tunnel --url http://localhost:4111
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
This outputs a temporary public URL like `https://triple-arms-solutions-kit.trycloudflare.com`. The URL changes each time you restart the tunnel.
|
|
139
|
+
|
|
140
|
+
In your Slack app settings, go to **Event Subscriptions** and toggle **Enable Events** to **On**.
|
|
141
|
+
|
|
142
|
+
Set the **Request URL** to your tunnel URL with the agent webhook path:
|
|
143
|
+
|
|
144
|
+
```text
|
|
145
|
+
https://<your-tunnel-url>/api/agents/slack-agent/channels/slack/webhook
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
Slack sends a verification request. If your dev server is running, it responds with a green checkmark.
|
|
149
|
+
|
|
150
|
+
Under **Subscribe to bot events**, add:
|
|
151
|
+
|
|
152
|
+
- `app_mention`
|
|
153
|
+
- `message.channels`
|
|
154
|
+
|
|
155
|
+
Select **Save Changes**. Slack requires you to reinstall the app after changing event subscriptions. Go to **OAuth & Permissions** and select **Reinstall to Workspace** to apply the updated permissions.
|
|
156
|
+
|
|
157
|
+
> **Note:** The tunnel URL is for local development only. When you [deploy your application](https://mastra.ai/docs/deployment/overview), update the **Request URL** in your Slack app's **Event Subscriptions** to your production URL (e.g. `https://your-app.example.com/api/agents/slack-agent/channels/slack/webhook`).
|
|
158
|
+
|
|
159
|
+
## Test the agent
|
|
160
|
+
|
|
161
|
+
Before testing with Slack, you can refine your agent's behavior in [Studio](https://mastra.ai/docs/studio/overview). Open <http://localhost:4111/> to chat with your agent directly, adjust its instructions, and inspect traces to see how responses are generated.
|
|
162
|
+
|
|
163
|
+
Once you're happy with the agent's responses, test the Slack integration. Invite the bot to a channel in Slack:
|
|
164
|
+
|
|
165
|
+
```text
|
|
166
|
+
/invite @your-bot-name
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
Mention the bot in the channel:
|
|
170
|
+
|
|
171
|
+
```text
|
|
172
|
+
@your-bot-name What can you help me with?
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
The agent responds in the thread. Output may vary depending on the model and instructions.
|
|
176
|
+
|
|
177
|
+
## Next steps
|
|
178
|
+
|
|
179
|
+
You can extend this agent to:
|
|
180
|
+
|
|
181
|
+
- [Deploy your application](https://mastra.ai/docs/deployment/overview) and update the Slack **Request URL** to your production endpoint
|
|
182
|
+
- Add more adapters (Discord, Telegram) to the same agent so it responds on multiple platforms
|
|
183
|
+
- Configure [multimodal content](https://mastra.ai/docs/agents/channels) to let your agent process images, video, and audio shared in chat
|
|
184
|
+
- Add [tools](https://mastra.ai/docs/agents/using-tools) to give the agent access to external APIs and data
|
|
185
|
+
|
|
186
|
+
Learn more:
|
|
187
|
+
|
|
188
|
+
- [Channels overview](https://mastra.ai/docs/agents/channels)
|
|
189
|
+
- [Studio](https://mastra.ai/docs/studio/overview)
|
|
190
|
+
- [Deployment overview](https://mastra.ai/docs/deployment/overview)
|
|
191
|
+
- [Chat SDK adapter docs](https://chat-sdk.dev)
|
|
@@ -4,6 +4,30 @@ Azure OpenAI provides enterprise-grade access to OpenAI models through dedicated
|
|
|
4
4
|
|
|
5
5
|
Unlike other providers that have fixed model names, Azure uses **deployment names** that you configure in the Azure Portal.
|
|
6
6
|
|
|
7
|
+
## Usage
|
|
8
|
+
|
|
9
|
+
```typescript
|
|
10
|
+
import { Agent } from "@mastra/core/agent";
|
|
11
|
+
|
|
12
|
+
const agent = new Agent({
|
|
13
|
+
id: "my-agent",
|
|
14
|
+
name: "My Agent",
|
|
15
|
+
instructions: "You are a helpful assistant",
|
|
16
|
+
model: "azure-openai/my-gpt4-deployment" // Use your Azure deployment name (autocompleted in dev mode)
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
// Generate a response
|
|
20
|
+
const response = await agent.generate("Hello!");
|
|
21
|
+
|
|
22
|
+
// Stream a response
|
|
23
|
+
const stream = await agent.stream("Tell me a story");
|
|
24
|
+
for await (const chunk of stream) {
|
|
25
|
+
console.log(chunk);
|
|
26
|
+
}
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
Check [Azure OpenAI model availability](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models) for region-specific options.
|
|
30
|
+
|
|
7
31
|
## How Azure Deployments Work
|
|
8
32
|
|
|
9
33
|
Azure model IDs follow this pattern: `azure-openai/your-deployment-name`
|
|
@@ -101,28 +125,4 @@ export const mastra = new Mastra({
|
|
|
101
125
|
| `management.subscriptionId` | `string` | Yes\* | Azure subscription ID |
|
|
102
126
|
| `management.resourceGroup` | `string` | Yes\* | Resource group name |
|
|
103
127
|
|
|
104
|
-
\* Required if `management` is provided
|
|
105
|
-
|
|
106
|
-
## Usage
|
|
107
|
-
|
|
108
|
-
```typescript
|
|
109
|
-
import { Agent } from "@mastra/core/agent";
|
|
110
|
-
|
|
111
|
-
const agent = new Agent({
|
|
112
|
-
id: "my-agent",
|
|
113
|
-
name: "My Agent",
|
|
114
|
-
instructions: "You are a helpful assistant",
|
|
115
|
-
model: "azure-openai/my-gpt4-deployment" // Use your Azure deployment name (autocompleted in dev mode)
|
|
116
|
-
});
|
|
117
|
-
|
|
118
|
-
// Generate a response
|
|
119
|
-
const response = await agent.generate("Hello!");
|
|
120
|
-
|
|
121
|
-
// Stream a response
|
|
122
|
-
const stream = await agent.stream("Tell me a story");
|
|
123
|
-
for await (const chunk of stream) {
|
|
124
|
-
console.log(chunk);
|
|
125
|
-
}
|
|
126
|
-
```
|
|
127
|
-
|
|
128
|
-
Check [Azure OpenAI model availability](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models) for region-specific options.
|
|
128
|
+
\* Required if `management` is provided
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# Mastra
|
|
2
|
+
|
|
3
|
+
The Mastra Memory Gateway is an OpenAI-compatible API proxy with built-in [Observational Memory](https://gateway.mastra.ai/docs/features#observational-memory). Point any HTTP client, SDK, or framework at the gateway and every conversation is automatically remembered without any memory management code.
|
|
4
|
+
|
|
5
|
+
Learn more in the [Memory Gateway documentation](https://gateway.mastra.ai/docs).
|
|
6
|
+
|
|
7
|
+
## Get an API key
|
|
8
|
+
|
|
9
|
+
Go to [gateway.mastra.ai](https://gateway.mastra.ai) and sign up for a Mastra account. During the onboarding you'll get your personal API key to authenticate requests.
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
Define your API key as an environment variable:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
MASTRA_GATEWAY_API_KEY=your-gateway-key
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
Set your gateway model ID:
|
|
20
|
+
|
|
21
|
+
```typescript
|
|
22
|
+
import { Agent } from "@mastra/core/agent";
|
|
23
|
+
|
|
24
|
+
const agent = new Agent({
|
|
25
|
+
id: "my-agent",
|
|
26
|
+
name: "My Agent",
|
|
27
|
+
instructions: "You are a helpful assistant",
|
|
28
|
+
model: "mastra/openai/gpt-5-mini"
|
|
29
|
+
});
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
Pass `memory.thread` and `memory.resource` when you generate/stream responses to enable Observational Memory:
|
|
33
|
+
|
|
34
|
+
```typescript
|
|
35
|
+
import { weatherAgent } from "./agents/weather-agent";
|
|
36
|
+
|
|
37
|
+
const memory = {
|
|
38
|
+
thread: "assistant-thread-1",
|
|
39
|
+
resource: "user-42",
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
const result = await weatherAgent.stream("My name is Alex and I prefer concise answers.", {
|
|
43
|
+
memory,
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
for await (const chunk of result.textStream) {
|
|
47
|
+
process.stdout.write(chunk);
|
|
48
|
+
}
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Configuration
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
# Use gateway API key
|
|
55
|
+
MASTRA_GATEWAY_API_KEY=your-gateway-key
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Learn more
|
|
59
|
+
|
|
60
|
+
- [Features](https://gateway.mastra.ai/docs/features)
|
|
61
|
+
- [Models](https://gateway.mastra.ai/docs/models)
|
|
62
|
+
- [Limits](https://gateway.mastra.ai/docs/limits)
|
|
63
|
+
- [API Reference](https://gateway.mastra.ai/docs/api/overview)
|
|
64
|
+
- [Examples](https://gateway.mastra.ai/docs/examples/)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Netlify
|
|
2
2
|
|
|
3
|
-
Netlify AI Gateway provides unified access to multiple providers with built-in caching and observability. Access
|
|
3
|
+
Netlify AI Gateway provides unified access to multiple providers with built-in caching and observability. Access 66 models through Mastra's model router.
|
|
4
4
|
|
|
5
5
|
Learn more in the [Netlify documentation](https://docs.netlify.com/build/ai-gateway/overview/).
|
|
6
6
|
|
|
@@ -94,6 +94,10 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
94
94
|
| `openai/gpt-5.4-nano-2026-03-17` |
|
|
95
95
|
| `openai/gpt-5.4-pro` |
|
|
96
96
|
| `openai/gpt-5.4-pro-2026-03-05` |
|
|
97
|
+
| `openai/gpt-5.5` |
|
|
98
|
+
| `openai/gpt-5.5-2026-04-23` |
|
|
99
|
+
| `openai/gpt-5.5-pro` |
|
|
100
|
+
| `openai/gpt-5.5-pro-2026-04-23` |
|
|
97
101
|
| `openai/o3` |
|
|
98
102
|
| `openai/o3-mini` |
|
|
99
103
|
| `openai/o4-mini` |
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# OpenRouter
|
|
2
2
|
|
|
3
|
-
OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access
|
|
3
|
+
OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 179 models through Mastra's model router.
|
|
4
4
|
|
|
5
5
|
Learn more in the [OpenRouter documentation](https://openrouter.ai/models).
|
|
6
6
|
|
|
@@ -61,6 +61,8 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
61
61
|
| `deepseek/deepseek-v3.1-terminus:exacto` |
|
|
62
62
|
| `deepseek/deepseek-v3.2` |
|
|
63
63
|
| `deepseek/deepseek-v3.2-speciale` |
|
|
64
|
+
| `deepseek/deepseek-v4-flash` |
|
|
65
|
+
| `deepseek/deepseek-v4-pro` |
|
|
64
66
|
| `google/gemini-2.0-flash-001` |
|
|
65
67
|
| `google/gemini-2.5-flash` |
|
|
66
68
|
| `google/gemini-2.5-flash-lite` |
|
|
@@ -71,6 +73,7 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
71
73
|
| `google/gemini-2.5-pro-preview-06-05` |
|
|
72
74
|
| `google/gemini-3-flash-preview` |
|
|
73
75
|
| `google/gemini-3-pro-preview` |
|
|
76
|
+
| `google/gemini-3.1-flash-image-preview` |
|
|
74
77
|
| `google/gemini-3.1-flash-lite-preview` |
|
|
75
78
|
| `google/gemini-3.1-pro-preview` |
|
|
76
79
|
| `google/gemini-3.1-pro-preview-customtools` |
|
|
@@ -151,6 +154,7 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
151
154
|
| `openai/gpt-5.4-mini` |
|
|
152
155
|
| `openai/gpt-5.4-nano` |
|
|
153
156
|
| `openai/gpt-5.4-pro` |
|
|
157
|
+
| `openai/gpt-5.5` |
|
|
154
158
|
| `openai/gpt-oss-120b` |
|
|
155
159
|
| `openai/gpt-oss-120b:exacto` |
|
|
156
160
|
| `openai/gpt-oss-120b:free` |
|
|
@@ -160,6 +164,7 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
160
164
|
| `openai/o4-mini` |
|
|
161
165
|
| `openrouter/elephant-alpha` |
|
|
162
166
|
| `openrouter/free` |
|
|
167
|
+
| `openrouter/pareto-code` |
|
|
163
168
|
| `prime-intellect/intellect-3` |
|
|
164
169
|
| `qwen/qwen-2.5-coder-32b-instruct` |
|
|
165
170
|
| `qwen/qwen2.5-vl-72b-instruct` |
|
|
@@ -195,6 +200,8 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
195
200
|
| `xiaomi/mimo-v2-flash` |
|
|
196
201
|
| `xiaomi/mimo-v2-omni` |
|
|
197
202
|
| `xiaomi/mimo-v2-pro` |
|
|
203
|
+
| `xiaomi/mimo-v2.5` |
|
|
204
|
+
| `xiaomi/mimo-v2.5-pro` |
|
|
198
205
|
| `z-ai/glm-4.5` |
|
|
199
206
|
| `z-ai/glm-4.5-air` |
|
|
200
207
|
| `z-ai/glm-4.5-air:free` |
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Vercel
|
|
2
2
|
|
|
3
|
-
Vercel aggregates models from multiple providers with enhanced features like rate limiting and failover. Access
|
|
3
|
+
Vercel aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 240 models through Mastra's model router.
|
|
4
4
|
|
|
5
5
|
Learn more in the [Vercel documentation](https://ai-sdk.dev/providers/ai-sdk-providers).
|
|
6
6
|
|
|
@@ -38,6 +38,7 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
38
38
|
| `alibaba/qwen-3-235b` |
|
|
39
39
|
| `alibaba/qwen-3-30b` |
|
|
40
40
|
| `alibaba/qwen-3-32b` |
|
|
41
|
+
| `alibaba/qwen-3.6-max-preview` |
|
|
41
42
|
| `alibaba/qwen3-235b-a22b-thinking` |
|
|
42
43
|
| `alibaba/qwen3-coder` |
|
|
43
44
|
| `alibaba/qwen3-coder-30b-a3b` |
|
|
@@ -95,6 +96,8 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
95
96
|
| `deepseek/deepseek-v3.2` |
|
|
96
97
|
| `deepseek/deepseek-v3.2-exp` |
|
|
97
98
|
| `deepseek/deepseek-v3.2-thinking` |
|
|
99
|
+
| `deepseek/deepseek-v4-flash` |
|
|
100
|
+
| `deepseek/deepseek-v4-pro` |
|
|
98
101
|
| `google/gemini-2.0-flash` |
|
|
99
102
|
| `google/gemini-2.0-flash-lite` |
|
|
100
103
|
| `google/gemini-2.5-flash` |
|
|
@@ -166,6 +169,7 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
166
169
|
| `moonshotai/kimi-k2-thinking-turbo` |
|
|
167
170
|
| `moonshotai/kimi-k2-turbo` |
|
|
168
171
|
| `moonshotai/kimi-k2.5` |
|
|
172
|
+
| `moonshotai/kimi-k2.6` |
|
|
169
173
|
| `morph/morph-v3-fast` |
|
|
170
174
|
| `morph/morph-v3-large` |
|
|
171
175
|
| `nvidia/nemotron-3-nano-30b-a3b` |
|
|
@@ -203,6 +207,8 @@ ANTHROPIC_API_KEY=ant-...
|
|
|
203
207
|
| `openai/gpt-5.4-mini` |
|
|
204
208
|
| `openai/gpt-5.4-nano` |
|
|
205
209
|
| `openai/gpt-5.4-pro` |
|
|
210
|
+
| `openai/gpt-5.5` |
|
|
211
|
+
| `openai/gpt-5.5-pro` |
|
|
206
212
|
| `openai/gpt-oss-120b` |
|
|
207
213
|
| `openai/gpt-oss-20b` |
|
|
208
214
|
| `openai/gpt-oss-safeguard-20b` |
|
package/.docs/models/gateways.md
CHANGED
|
@@ -9,6 +9,7 @@ Create custom gateways for private LLM deployments or specialized provider integ
|
|
|
9
9
|
## Built-in gateways
|
|
10
10
|
|
|
11
11
|
- [Azure OpenAI](https://mastra.ai/models/gateways/azure-openai)
|
|
12
|
+
- [Mastra](https://mastra.ai/models/gateways/mastra)
|
|
12
13
|
- [Netlify](https://mastra.ai/models/gateways/netlify)
|
|
13
14
|
- [OpenRouter](https://mastra.ai/models/gateways/openrouter)
|
|
14
15
|
- [Vercel](https://mastra.ai/models/gateways/vercel)
|
package/.docs/models/index.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Model Providers
|
|
2
2
|
|
|
3
|
-
Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to
|
|
3
|
+
Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3757 models from 106 providers through a single API.
|
|
4
4
|
|
|
5
5
|
## Features
|
|
6
6
|
|
|
@@ -27,7 +27,7 @@ const agent = new Agent({
|
|
|
27
27
|
id: "my-agent",
|
|
28
28
|
name: "My Agent",
|
|
29
29
|
instructions: "You are a helpful assistant",
|
|
30
|
-
model: "openai/gpt-5"
|
|
30
|
+
model: "openai/gpt-5.5"
|
|
31
31
|
})
|
|
32
32
|
```
|
|
33
33
|
|
|
@@ -40,7 +40,7 @@ const agent = new Agent({
|
|
|
40
40
|
id: "my-agent",
|
|
41
41
|
name: "My Agent",
|
|
42
42
|
instructions: "You are a helpful assistant",
|
|
43
|
-
model: "anthropic/claude-4-
|
|
43
|
+
model: "anthropic/claude-sonnet-4-6"
|
|
44
44
|
})
|
|
45
45
|
```
|
|
46
46
|
|
|
@@ -79,7 +79,7 @@ const agent = new Agent({
|
|
|
79
79
|
id: "my-agent",
|
|
80
80
|
name: "My Agent",
|
|
81
81
|
instructions: "You are a helpful assistant",
|
|
82
|
-
model: "openrouter/anthropic/claude-haiku-4
|
|
82
|
+
model: "openrouter/anthropic/claude-haiku-4.5"
|
|
83
83
|
})
|
|
84
84
|
```
|
|
85
85
|
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# abliteration.ai
|
|
2
|
+
|
|
3
|
+
Access 1 abliteration.ai model through Mastra's model router. Authentication is handled automatically using the `ABLIT_KEY` environment variable.
|
|
4
|
+
|
|
5
|
+
Learn more in the [abliteration.ai documentation](https://docs.abliteration.ai/models).
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
ABLIT_KEY=your-api-key
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```typescript
|
|
12
|
+
import { Agent } from "@mastra/core/agent";
|
|
13
|
+
|
|
14
|
+
const agent = new Agent({
|
|
15
|
+
id: "my-agent",
|
|
16
|
+
name: "My Agent",
|
|
17
|
+
instructions: "You are a helpful assistant",
|
|
18
|
+
model: "abliteration-ai/abliterated-model"
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
// Generate a response
|
|
22
|
+
const response = await agent.generate("Hello!");
|
|
23
|
+
|
|
24
|
+
// Stream a response
|
|
25
|
+
const stream = await agent.stream("Tell me a story");
|
|
26
|
+
for await (const chunk of stream) {
|
|
27
|
+
console.log(chunk);
|
|
28
|
+
}
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
> **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [abliteration.ai documentation](https://docs.abliteration.ai/models) for details.
|
|
32
|
+
|
|
33
|
+
## Models
|
|
34
|
+
|
|
35
|
+
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
36
|
+
| ----------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
37
|
+
| `abliteration-ai/abliterated-model` | 150K | | | | | | $3 | $3 |
|
|
38
|
+
|
|
39
|
+
## Advanced configuration
|
|
40
|
+
|
|
41
|
+
### Custom headers
|
|
42
|
+
|
|
43
|
+
```typescript
|
|
44
|
+
const agent = new Agent({
|
|
45
|
+
id: "custom-agent",
|
|
46
|
+
name: "custom-agent",
|
|
47
|
+
model: {
|
|
48
|
+
url: "https://api.abliteration.ai/v1",
|
|
49
|
+
id: "abliteration-ai/abliterated-model",
|
|
50
|
+
apiKey: process.env.ABLIT_KEY,
|
|
51
|
+
headers: {
|
|
52
|
+
"X-Custom-Header": "value"
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
});
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Dynamic model selection
|
|
59
|
+
|
|
60
|
+
```typescript
|
|
61
|
+
const agent = new Agent({
|
|
62
|
+
id: "dynamic-agent",
|
|
63
|
+
name: "Dynamic Agent",
|
|
64
|
+
model: ({ requestContext }) => {
|
|
65
|
+
const useAdvanced = requestContext.task === "complex";
|
|
66
|
+
return useAdvanced
|
|
67
|
+
? "abliteration-ai/abliterated-model"
|
|
68
|
+
: "abliteration-ai/abliterated-model";
|
|
69
|
+
}
|
|
70
|
+
});
|
|
71
|
+
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Alibaba (China)
|
|
2
2
|
|
|
3
|
-
Access
|
|
3
|
+
Access 80 Alibaba (China) models through Mastra's model router. Authentication is handled automatically using the `DASHSCOPE_API_KEY` environment variable.
|
|
4
4
|
|
|
5
5
|
Learn more in the [Alibaba (China) documentation](https://www.alibabacloud.com/help/en/model-studio/models).
|
|
6
6
|
|
|
@@ -45,6 +45,8 @@ for await (const chunk of stream) {
|
|
|
45
45
|
| `alibaba-cn/deepseek-v3` | 66K | | | | | | $0.29 | $1 |
|
|
46
46
|
| `alibaba-cn/deepseek-v3-1` | 131K | | | | | | $0.57 | $2 |
|
|
47
47
|
| `alibaba-cn/deepseek-v3-2-exp` | 131K | | | | | | $0.29 | $0.43 |
|
|
48
|
+
| `alibaba-cn/deepseek-v4-flash` | 1.0M | | | | | | $0.14 | $0.28 |
|
|
49
|
+
| `alibaba-cn/deepseek-v4-pro` | 1.0M | | | | | | $2 | $3 |
|
|
48
50
|
| `alibaba-cn/glm-5` | 203K | | | | | | $0.86 | $3 |
|
|
49
51
|
| `alibaba-cn/glm-5.1` | 203K | | | | | | $0.87 | $3 |
|
|
50
52
|
| `alibaba-cn/kimi-k2-thinking` | 262K | | | | | | $0.57 | $2 |
|
|
@@ -103,6 +105,7 @@ for await (const chunk of stream) {
|
|
|
103
105
|
| `alibaba-cn/qwen3.5-397b-a17b` | 262K | | | | | | $0.43 | $3 |
|
|
104
106
|
| `alibaba-cn/qwen3.5-flash` | 1.0M | | | | | | $0.17 | $2 |
|
|
105
107
|
| `alibaba-cn/qwen3.5-plus` | 1.0M | | | | | | $0.57 | $3 |
|
|
108
|
+
| `alibaba-cn/qwen3.6-max-preview` | 246K | | | | | | $1 | $8 |
|
|
106
109
|
| `alibaba-cn/qwen3.6-plus` | 1.0M | | | | | | $0.28 | $2 |
|
|
107
110
|
| `alibaba-cn/qwq-32b` | 131K | | | | | | $0.29 | $0.86 |
|
|
108
111
|
| `alibaba-cn/qwq-plus` | 131K | | | | | | $0.23 | $0.57 |
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Alibaba
|
|
2
2
|
|
|
3
|
-
Access
|
|
3
|
+
Access 47 Alibaba models through Mastra's model router. Authentication is handled automatically using the `DASHSCOPE_API_KEY` environment variable.
|
|
4
4
|
|
|
5
5
|
Learn more in the [Alibaba documentation](https://www.alibabacloud.com/help/en/model-studio/models).
|
|
6
6
|
|
|
@@ -72,8 +72,13 @@ for await (const chunk of stream) {
|
|
|
72
72
|
| `alibaba/qwen3-vl-235b-a22b` | 131K | | | | | | $0.70 | $3 |
|
|
73
73
|
| `alibaba/qwen3-vl-30b-a3b` | 131K | | | | | | $0.20 | $0.80 |
|
|
74
74
|
| `alibaba/qwen3-vl-plus` | 262K | | | | | | $0.20 | $2 |
|
|
75
|
+
| `alibaba/qwen3.5-122b-a10b` | 262K | | | | | | $0.40 | $3 |
|
|
76
|
+
| `alibaba/qwen3.5-27b` | 262K | | | | | | $0.30 | $2 |
|
|
77
|
+
| `alibaba/qwen3.5-35b-a3b` | 262K | | | | | | $0.25 | $2 |
|
|
75
78
|
| `alibaba/qwen3.5-397b-a17b` | 262K | | | | | | $0.60 | $4 |
|
|
76
79
|
| `alibaba/qwen3.5-plus` | 1.0M | | | | | | $0.40 | $2 |
|
|
80
|
+
| `alibaba/qwen3.6-27b` | 262K | | | | | | $0.60 | $4 |
|
|
81
|
+
| `alibaba/qwen3.6-35b-a3b` | 262K | | | | | | $0.25 | $1 |
|
|
77
82
|
| `alibaba/qwen3.6-plus` | 1.0M | | | | | | $0.28 | $2 |
|
|
78
83
|
| `alibaba/qwq-plus` | 131K | | | | | | $0.80 | $2 |
|
|
79
84
|
|