@mastra/mcp-docs-server 1.1.6 → 1.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/course/02-agent-tools-mcp/07-what-is-zapier-mcp.md +10 -1
- package/.docs/course/02-agent-tools-mcp/08-getting-zapier-mcp-url.md +14 -9
- package/.docs/course/02-agent-tools-mcp/09-updating-mcp-config-zapier.md +13 -1
- package/.docs/course/02-agent-tools-mcp/12-troubleshooting-zapier.md +10 -8
- package/.docs/course/02-agent-tools-mcp/13-what-is-github-mcp.md +1 -1
- package/.docs/course/02-agent-tools-mcp/14-getting-github-mcp-url.md +28 -20
- package/.docs/course/02-agent-tools-mcp/15-updating-mcp-config-github.md +18 -2
- package/.docs/course/02-agent-tools-mcp/18-troubleshooting-github.md +5 -5
- package/.docs/course/02-agent-tools-mcp/20-updating-mcp-config-hackernews.md +11 -1
- package/.docs/course/02-agent-tools-mcp/26-updating-mcp-config-filesystem.md +11 -1
- package/.docs/course/02-agent-tools-mcp/32-conclusion.md +1 -1
- package/.docs/docs/agents/using-tools.md +34 -0
- package/.docs/docs/deployment/studio.md +8 -0
- package/.docs/docs/memory/observational-memory.md +3 -5
- package/.docs/docs/server/auth/better-auth.md +23 -6
- package/.docs/docs/workspace/sandbox.md +2 -0
- package/.docs/guides/deployment/vercel.md +19 -0
- package/.docs/guides/index.md +20 -1
- package/.docs/models/gateways/netlify.md +11 -6
- package/.docs/models/gateways/openrouter.md +4 -1
- package/.docs/models/gateways/vercel.md +13 -3
- package/.docs/models/index.md +1 -1
- package/.docs/models/providers/aihubmix.md +7 -1
- package/.docs/models/providers/anthropic.md +3 -2
- package/.docs/models/providers/baseten.md +7 -5
- package/.docs/models/providers/chutes.md +2 -1
- package/.docs/models/providers/cloudferro-sherlock.md +74 -0
- package/.docs/models/providers/evroc.md +83 -0
- package/.docs/models/providers/fireworks-ai.md +20 -26
- package/.docs/models/providers/firmware.md +2 -1
- package/.docs/models/providers/friendli.md +5 -6
- package/.docs/models/providers/google.md +3 -1
- package/.docs/models/providers/meganova.md +89 -0
- package/.docs/models/providers/opencode-go.md +73 -0
- package/.docs/models/providers/opencode.md +33 -33
- package/.docs/models/providers/perplexity-agent.md +113 -0
- package/.docs/models/providers/perplexity.md +2 -1
- package/.docs/models/providers/poe.md +2 -1
- package/.docs/models/providers/qihang-ai.md +79 -0
- package/.docs/models/providers/qiniu-ai.md +146 -0
- package/.docs/models/providers/siliconflow-cn.md +5 -1
- package/.docs/models/providers/togetherai.md +2 -1
- package/.docs/models/providers/zenmux.md +5 -1
- package/.docs/models/providers.md +7 -0
- package/.docs/reference/agents/network.md +38 -1
- package/.docs/reference/ai-sdk/with-mastra.md +5 -1
- package/.docs/reference/deployer/vercel.md +28 -3
- package/.docs/reference/harness/harness-class.md +58 -6
- package/.docs/reference/index.md +1 -1
- package/.docs/reference/memory/cloneThread.md +13 -1
- package/.docs/reference/memory/observational-memory.md +4 -2
- package/.docs/reference/streaming/agents/stream.md +34 -0
- package/.docs/reference/tools/create-tool.md +48 -0
- package/.docs/reference/workspace/daytona-sandbox.md +580 -0
- package/.docs/reference/workspace/s3-filesystem.md +2 -0
- package/CHANGELOG.md +16 -0
- package/LICENSE.md +15 -0
- package/package.json +6 -6
- package/.docs/docs/mastra-code/configuration.md +0 -299
- package/.docs/docs/mastra-code/customization.md +0 -228
- package/.docs/docs/mastra-code/modes.md +0 -104
- package/.docs/docs/mastra-code/overview.md +0 -135
- package/.docs/docs/mastra-code/tools.md +0 -229
- package/.docs/reference/mastra-code/createMastraCode.md +0 -108
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# OpenCode Go
|
|
2
|
+
|
|
3
|
+
Access 3 OpenCode Go models through Mastra's model router. Authentication is handled automatically using the `OPENCODE_API_KEY` environment variable.
|
|
4
|
+
|
|
5
|
+
Learn more in the [OpenCode Go documentation](https://opencode.ai/docs/zen).
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
OPENCODE_API_KEY=your-api-key
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```typescript
|
|
12
|
+
import { Agent } from "@mastra/core/agent";
|
|
13
|
+
|
|
14
|
+
const agent = new Agent({
|
|
15
|
+
id: "my-agent",
|
|
16
|
+
name: "My Agent",
|
|
17
|
+
instructions: "You are a helpful assistant",
|
|
18
|
+
model: "opencode-go/glm-5"
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
// Generate a response
|
|
22
|
+
const response = await agent.generate("Hello!");
|
|
23
|
+
|
|
24
|
+
// Stream a response
|
|
25
|
+
const stream = await agent.stream("Tell me a story");
|
|
26
|
+
for await (const chunk of stream) {
|
|
27
|
+
console.log(chunk);
|
|
28
|
+
}
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
> **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [OpenCode Go documentation](https://opencode.ai/docs/zen) for details.
|
|
32
|
+
|
|
33
|
+
## Models
|
|
34
|
+
|
|
35
|
+
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
36
|
+
| -------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
37
|
+
| `opencode-go/glm-5` | 205K | | | | | | $1 | $3 |
|
|
38
|
+
| `opencode-go/kimi-k2.5` | 262K | | | | | | $0.60 | $3 |
|
|
39
|
+
| `opencode-go/minimax-m2.5` | 205K | | | | | | $0.30 | $1 |
|
|
40
|
+
|
|
41
|
+
## Advanced Configuration
|
|
42
|
+
|
|
43
|
+
### Custom Headers
|
|
44
|
+
|
|
45
|
+
```typescript
|
|
46
|
+
const agent = new Agent({
|
|
47
|
+
id: "custom-agent",
|
|
48
|
+
name: "custom-agent",
|
|
49
|
+
model: {
|
|
50
|
+
url: "https://opencode.ai/zen/go/v1",
|
|
51
|
+
id: "opencode-go/glm-5",
|
|
52
|
+
apiKey: process.env.OPENCODE_API_KEY,
|
|
53
|
+
headers: {
|
|
54
|
+
"X-Custom-Header": "value"
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
});
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
### Dynamic Model Selection
|
|
61
|
+
|
|
62
|
+
```typescript
|
|
63
|
+
const agent = new Agent({
|
|
64
|
+
id: "dynamic-agent",
|
|
65
|
+
name: "Dynamic Agent",
|
|
66
|
+
model: ({ requestContext }) => {
|
|
67
|
+
const useAdvanced = requestContext.task === "complex";
|
|
68
|
+
return useAdvanced
|
|
69
|
+
? "opencode-go/minimax-m2.5"
|
|
70
|
+
: "opencode-go/glm-5";
|
|
71
|
+
}
|
|
72
|
+
});
|
|
73
|
+
```
|
|
@@ -32,38 +32,38 @@ for await (const chunk of stream) {
|
|
|
32
32
|
|
|
33
33
|
## Models
|
|
34
34
|
|
|
35
|
-
| Model
|
|
36
|
-
|
|
|
37
|
-
| `opencode/big-pickle`
|
|
38
|
-
| `opencode/claude-3-5-haiku`
|
|
39
|
-
| `opencode/claude-haiku-4-5`
|
|
40
|
-
| `opencode/claude-opus-4-1`
|
|
41
|
-
| `opencode/claude-opus-4-5`
|
|
42
|
-
| `opencode/claude-opus-4-6`
|
|
43
|
-
| `opencode/claude-sonnet-4`
|
|
44
|
-
| `opencode/claude-sonnet-4-5`
|
|
45
|
-
| `opencode/
|
|
46
|
-
| `opencode/gemini-3-
|
|
47
|
-
| `opencode/
|
|
48
|
-
| `opencode/
|
|
49
|
-
| `opencode/glm-
|
|
50
|
-
| `opencode/glm-
|
|
51
|
-
| `opencode/
|
|
52
|
-
| `opencode/gpt-5
|
|
53
|
-
| `opencode/gpt-5-
|
|
54
|
-
| `opencode/gpt-5
|
|
55
|
-
| `opencode/gpt-5.1
|
|
56
|
-
| `opencode/gpt-5.1-codex
|
|
57
|
-
| `opencode/gpt-5.1-codex-
|
|
58
|
-
| `opencode/gpt-5.
|
|
59
|
-
| `opencode/gpt-5.2
|
|
60
|
-
| `opencode/
|
|
61
|
-
| `opencode/
|
|
62
|
-
| `opencode/kimi-k2.5`
|
|
63
|
-
| `opencode/
|
|
64
|
-
| `opencode/minimax-m2.
|
|
65
|
-
| `opencode/minimax-m2.5`
|
|
66
|
-
| `opencode/
|
|
35
|
+
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
36
|
+
| ------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
37
|
+
| `opencode/big-pickle` | 200K | | | | | | — | — |
|
|
38
|
+
| `opencode/claude-3-5-haiku` | 200K | | | | | | $0.80 | $4 |
|
|
39
|
+
| `opencode/claude-haiku-4-5` | 200K | | | | | | $1 | $5 |
|
|
40
|
+
| `opencode/claude-opus-4-1` | 200K | | | | | | $15 | $75 |
|
|
41
|
+
| `opencode/claude-opus-4-5` | 200K | | | | | | $5 | $25 |
|
|
42
|
+
| `opencode/claude-opus-4-6` | 1.0M | | | | | | $5 | $25 |
|
|
43
|
+
| `opencode/claude-sonnet-4` | 1.0M | | | | | | $3 | $15 |
|
|
44
|
+
| `opencode/claude-sonnet-4-5` | 1.0M | | | | | | $3 | $15 |
|
|
45
|
+
| `opencode/claude-sonnet-4-6` | 1.0M | | | | | | $3 | $15 |
|
|
46
|
+
| `opencode/gemini-3-flash` | 1.0M | | | | | | $0.50 | $3 |
|
|
47
|
+
| `opencode/gemini-3-pro` | 1.0M | | | | | | $2 | $12 |
|
|
48
|
+
| `opencode/gemini-3.1-pro` | 1.0M | | | | | | $2 | $12 |
|
|
49
|
+
| `opencode/glm-4.6` | 205K | | | | | | $0.60 | $2 |
|
|
50
|
+
| `opencode/glm-4.7` | 205K | | | | | | $0.60 | $2 |
|
|
51
|
+
| `opencode/glm-5` | 205K | | | | | | $1 | $3 |
|
|
52
|
+
| `opencode/gpt-5` | 400K | | | | | | $1 | $9 |
|
|
53
|
+
| `opencode/gpt-5-codex` | 400K | | | | | | $1 | $9 |
|
|
54
|
+
| `opencode/gpt-5-nano` | 400K | | | | | | — | — |
|
|
55
|
+
| `opencode/gpt-5.1` | 400K | | | | | | $1 | $9 |
|
|
56
|
+
| `opencode/gpt-5.1-codex` | 400K | | | | | | $1 | $9 |
|
|
57
|
+
| `opencode/gpt-5.1-codex-max` | 400K | | | | | | $1 | $10 |
|
|
58
|
+
| `opencode/gpt-5.1-codex-mini` | 400K | | | | | | $0.25 | $2 |
|
|
59
|
+
| `opencode/gpt-5.2` | 400K | | | | | | $2 | $14 |
|
|
60
|
+
| `opencode/gpt-5.2-codex` | 400K | | | | | | $2 | $14 |
|
|
61
|
+
| `opencode/gpt-5.3-codex` | 400K | | | | | | $2 | $14 |
|
|
62
|
+
| `opencode/kimi-k2.5` | 262K | | | | | | $0.60 | $3 |
|
|
63
|
+
| `opencode/minimax-m2.1` | 205K | | | | | | $0.30 | $1 |
|
|
64
|
+
| `opencode/minimax-m2.5` | 205K | | | | | | $0.30 | $1 |
|
|
65
|
+
| `opencode/minimax-m2.5-free` | 205K | | | | | | — | — |
|
|
66
|
+
| `opencode/trinity-large-preview-free` | 131K | | | | | | — | — |
|
|
67
67
|
|
|
68
68
|
## Advanced Configuration
|
|
69
69
|
|
|
@@ -93,7 +93,7 @@ const agent = new Agent({
|
|
|
93
93
|
model: ({ requestContext }) => {
|
|
94
94
|
const useAdvanced = requestContext.task === "complex";
|
|
95
95
|
return useAdvanced
|
|
96
|
-
? "opencode/
|
|
96
|
+
? "opencode/trinity-large-preview-free"
|
|
97
97
|
: "opencode/big-pickle";
|
|
98
98
|
}
|
|
99
99
|
});
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# Perplexity Agent
|
|
2
|
+
|
|
3
|
+
Access 15 Perplexity Agent models through Mastra's model router. Authentication is handled automatically using the `PERPLEXITY_API_KEY` environment variable.
|
|
4
|
+
|
|
5
|
+
Learn more in the [Perplexity Agent documentation](https://docs.perplexity.ai/docs/agent-api/models).
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
PERPLEXITY_API_KEY=your-api-key
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```typescript
|
|
12
|
+
import { Agent } from "@mastra/core/agent";
|
|
13
|
+
|
|
14
|
+
const agent = new Agent({
|
|
15
|
+
id: "my-agent",
|
|
16
|
+
name: "My Agent",
|
|
17
|
+
instructions: "You are a helpful assistant",
|
|
18
|
+
model: "perplexity-agent/anthropic/claude-haiku-4-5"
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
// Generate a response
|
|
22
|
+
const response = await agent.generate("Hello!");
|
|
23
|
+
|
|
24
|
+
// Stream a response
|
|
25
|
+
const stream = await agent.stream("Tell me a story");
|
|
26
|
+
for await (const chunk of stream) {
|
|
27
|
+
console.log(chunk);
|
|
28
|
+
}
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
> **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [Perplexity Agent documentation](https://docs.perplexity.ai/docs/agent-api/models) for details.
|
|
32
|
+
|
|
33
|
+
## Models
|
|
34
|
+
|
|
35
|
+
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
36
|
+
| -------------------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
37
|
+
| `perplexity-agent/anthropic/claude-haiku-4-5` | 200K | | | | | | $1 | $5 |
|
|
38
|
+
| `perplexity-agent/anthropic/claude-opus-4-5` | 200K | | | | | | $5 | $25 |
|
|
39
|
+
| `perplexity-agent/anthropic/claude-opus-4-6` | 200K | | | | | | $5 | $25 |
|
|
40
|
+
| `perplexity-agent/anthropic/claude-sonnet-4-5` | 200K | | | | | | $3 | $15 |
|
|
41
|
+
| `perplexity-agent/anthropic/claude-sonnet-4-6` | 200K | | | | | | $3 | $15 |
|
|
42
|
+
| `perplexity-agent/google/gemini-2.5-flash` | 1.0M | | | | | | $0.30 | $3 |
|
|
43
|
+
| `perplexity-agent/google/gemini-2.5-pro` | 1.0M | | | | | | $1 | $10 |
|
|
44
|
+
| `perplexity-agent/google/gemini-3-flash-preview` | 1.0M | | | | | | $0.50 | $3 |
|
|
45
|
+
| `perplexity-agent/google/gemini-3-pro-preview` | 1.0M | | | | | | $2 | $12 |
|
|
46
|
+
| `perplexity-agent/google/gemini-3.1-pro-preview` | 1.0M | | | | | | $2 | $12 |
|
|
47
|
+
| `perplexity-agent/openai/gpt-5-mini` | 400K | | | | | | $0.25 | $2 |
|
|
48
|
+
| `perplexity-agent/openai/gpt-5.1` | 400K | | | | | | $1 | $10 |
|
|
49
|
+
| `perplexity-agent/openai/gpt-5.2` | 400K | | | | | | $2 | $14 |
|
|
50
|
+
| `perplexity-agent/perplexity/sonar` | 128K | | | | | | $0.25 | $3 |
|
|
51
|
+
| `perplexity-agent/xai/grok-4-1-fast-non-reasoning` | 2.0M | | | | | | $0.20 | $0.50 |
|
|
52
|
+
|
|
53
|
+
## Advanced Configuration
|
|
54
|
+
|
|
55
|
+
### Custom Headers
|
|
56
|
+
|
|
57
|
+
```typescript
|
|
58
|
+
const agent = new Agent({
|
|
59
|
+
id: "custom-agent",
|
|
60
|
+
name: "custom-agent",
|
|
61
|
+
model: {
|
|
62
|
+
url: "https://api.perplexity.ai/v1",
|
|
63
|
+
id: "perplexity-agent/anthropic/claude-haiku-4-5",
|
|
64
|
+
apiKey: process.env.PERPLEXITY_API_KEY,
|
|
65
|
+
headers: {
|
|
66
|
+
"X-Custom-Header": "value"
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### Dynamic Model Selection
|
|
73
|
+
|
|
74
|
+
```typescript
|
|
75
|
+
const agent = new Agent({
|
|
76
|
+
id: "dynamic-agent",
|
|
77
|
+
name: "Dynamic Agent",
|
|
78
|
+
model: ({ requestContext }) => {
|
|
79
|
+
const useAdvanced = requestContext.task === "complex";
|
|
80
|
+
return useAdvanced
|
|
81
|
+
? "perplexity-agent/xai/grok-4-1-fast-non-reasoning"
|
|
82
|
+
: "perplexity-agent/anthropic/claude-haiku-4-5";
|
|
83
|
+
}
|
|
84
|
+
});
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## Direct Provider Installation
|
|
88
|
+
|
|
89
|
+
This provider can also be installed directly as a standalone package, which can be used instead of the Mastra model router string. View the [package documentation](https://www.npmjs.com/package/@ai-sdk/openai) for more details.
|
|
90
|
+
|
|
91
|
+
**npm**:
|
|
92
|
+
|
|
93
|
+
```bash
|
|
94
|
+
npm install @ai-sdk/openai
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
**pnpm**:
|
|
98
|
+
|
|
99
|
+
```bash
|
|
100
|
+
pnpm add @ai-sdk/openai
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
**Yarn**:
|
|
104
|
+
|
|
105
|
+
```bash
|
|
106
|
+
yarn add @ai-sdk/openai
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
**Bun**:
|
|
110
|
+
|
|
111
|
+
```bash
|
|
112
|
+
bun add @ai-sdk/openai
|
|
113
|
+
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Perplexity
|
|
2
2
|
|
|
3
|
-
Access
|
|
3
|
+
Access 4 Perplexity models through Mastra's model router. Authentication is handled automatically using the `PERPLEXITY_API_KEY` environment variable.
|
|
4
4
|
|
|
5
5
|
Learn more in the [Perplexity documentation](https://docs.perplexity.ai).
|
|
6
6
|
|
|
@@ -33,6 +33,7 @@ for await (const chunk of stream) {
|
|
|
33
33
|
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
34
34
|
| -------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
35
35
|
| `perplexity/sonar` | 128K | | | | | | $1 | $1 |
|
|
36
|
+
| `perplexity/sonar-deep-research` | 128K | | | | | | $2 | $8 |
|
|
36
37
|
| `perplexity/sonar-pro` | 200K | | | | | | $3 | $15 |
|
|
37
38
|
| `perplexity/sonar-reasoning-pro` | 128K | | | | | | $2 | $8 |
|
|
38
39
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Poe
|
|
2
2
|
|
|
3
|
-
Access
|
|
3
|
+
Access 114 Poe models through Mastra's model router. Authentication is handled automatically using the `POE_API_KEY` environment variable.
|
|
4
4
|
|
|
5
5
|
Learn more in the [Poe documentation](https://creator.poe.com/docs/external-applications/openai-compatible-api).
|
|
6
6
|
|
|
@@ -46,6 +46,7 @@ for await (const chunk of stream) {
|
|
|
46
46
|
| `poe/anthropic/claude-sonnet-3.7` | 197K | | | | | | $3 | $13 |
|
|
47
47
|
| `poe/anthropic/claude-sonnet-4` | 983K | | | | | | $3 | $13 |
|
|
48
48
|
| `poe/anthropic/claude-sonnet-4.5` | 983K | | | | | | $3 | $13 |
|
|
49
|
+
| `poe/anthropic/claude-sonnet-4.6` | 983K | | | | | | $3 | $13 |
|
|
49
50
|
| `poe/cerebras/gpt-oss-120b-cs` | — | | | | | | — | — |
|
|
50
51
|
| `poe/cerebras/llama-3.1-8b-cs` | — | | | | | | — | — |
|
|
51
52
|
| `poe/cerebras/llama-3.3-70b-cs` | — | | | | | | — | — |
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
# QiHang
|
|
2
|
+
|
|
3
|
+
Access 9 QiHang models through Mastra's model router. Authentication is handled automatically using the `QIHANG_API_KEY` environment variable.
|
|
4
|
+
|
|
5
|
+
Learn more in the [QiHang documentation](https://www.qhaigc.net/docs).
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
QIHANG_API_KEY=your-api-key
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```typescript
|
|
12
|
+
import { Agent } from "@mastra/core/agent";
|
|
13
|
+
|
|
14
|
+
const agent = new Agent({
|
|
15
|
+
id: "my-agent",
|
|
16
|
+
name: "My Agent",
|
|
17
|
+
instructions: "You are a helpful assistant",
|
|
18
|
+
model: "qihang-ai/claude-haiku-4-5-20251001"
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
// Generate a response
|
|
22
|
+
const response = await agent.generate("Hello!");
|
|
23
|
+
|
|
24
|
+
// Stream a response
|
|
25
|
+
const stream = await agent.stream("Tell me a story");
|
|
26
|
+
for await (const chunk of stream) {
|
|
27
|
+
console.log(chunk);
|
|
28
|
+
}
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
> **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [QiHang documentation](https://www.qhaigc.net/docs) for details.
|
|
32
|
+
|
|
33
|
+
## Models
|
|
34
|
+
|
|
35
|
+
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
36
|
+
| -------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
37
|
+
| `qihang-ai/claude-haiku-4-5-20251001` | 200K | | | | | | $0.14 | $0.71 |
|
|
38
|
+
| `qihang-ai/claude-opus-4-5-20251101` | 200K | | | | | | $0.71 | $4 |
|
|
39
|
+
| `qihang-ai/claude-sonnet-4-5-20250929` | 200K | | | | | | $0.43 | $2 |
|
|
40
|
+
| `qihang-ai/gemini-2.5-flash` | 1.0M | | | | | | $0.09 | $0.71 |
|
|
41
|
+
| `qihang-ai/gemini-3-flash-preview` | 1.0M | | | | | | $0.07 | $0.43 |
|
|
42
|
+
| `qihang-ai/gemini-3-pro-preview` | 1.0M | | | | | | $0.57 | $3 |
|
|
43
|
+
| `qihang-ai/gpt-5-mini` | 200K | | | | | | $0.04 | $0.29 |
|
|
44
|
+
| `qihang-ai/gpt-5.2` | 400K | | | | | | $0.25 | $2 |
|
|
45
|
+
| `qihang-ai/gpt-5.2-codex` | 400K | | | | | | $0.14 | $1 |
|
|
46
|
+
|
|
47
|
+
## Advanced Configuration
|
|
48
|
+
|
|
49
|
+
### Custom Headers
|
|
50
|
+
|
|
51
|
+
```typescript
|
|
52
|
+
const agent = new Agent({
|
|
53
|
+
id: "custom-agent",
|
|
54
|
+
name: "custom-agent",
|
|
55
|
+
model: {
|
|
56
|
+
url: "https://api.qhaigc.net/v1",
|
|
57
|
+
id: "qihang-ai/claude-haiku-4-5-20251001",
|
|
58
|
+
apiKey: process.env.QIHANG_API_KEY,
|
|
59
|
+
headers: {
|
|
60
|
+
"X-Custom-Header": "value"
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
});
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
### Dynamic Model Selection
|
|
67
|
+
|
|
68
|
+
```typescript
|
|
69
|
+
const agent = new Agent({
|
|
70
|
+
id: "dynamic-agent",
|
|
71
|
+
name: "Dynamic Agent",
|
|
72
|
+
model: ({ requestContext }) => {
|
|
73
|
+
const useAdvanced = requestContext.task === "complex";
|
|
74
|
+
return useAdvanced
|
|
75
|
+
? "qihang-ai/gpt-5.2-codex"
|
|
76
|
+
: "qihang-ai/claude-haiku-4-5-20251001";
|
|
77
|
+
}
|
|
78
|
+
});
|
|
79
|
+
```
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
# Qiniu
|
|
2
|
+
|
|
3
|
+
Access 76 Qiniu models through Mastra's model router. Authentication is handled automatically using the `Qiniu_API_KEY` environment variable.
|
|
4
|
+
|
|
5
|
+
Learn more in the [Qiniu documentation](https://developer.qiniu.com/aitokenapi).
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
Qiniu_API_KEY=your-api-key
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```typescript
|
|
12
|
+
import { Agent } from "@mastra/core/agent";
|
|
13
|
+
|
|
14
|
+
const agent = new Agent({
|
|
15
|
+
id: "my-agent",
|
|
16
|
+
name: "My Agent",
|
|
17
|
+
instructions: "You are a helpful assistant",
|
|
18
|
+
model: "qiniu-ai/MiniMax-M1"
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
// Generate a response
|
|
22
|
+
const response = await agent.generate("Hello!");
|
|
23
|
+
|
|
24
|
+
// Stream a response
|
|
25
|
+
const stream = await agent.stream("Tell me a story");
|
|
26
|
+
for await (const chunk of stream) {
|
|
27
|
+
console.log(chunk);
|
|
28
|
+
}
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
> **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [Qiniu documentation](https://developer.qiniu.com/aitokenapi) for details.
|
|
32
|
+
|
|
33
|
+
## Models
|
|
34
|
+
|
|
35
|
+
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
36
|
+
| --------------------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
37
|
+
| `qiniu-ai/claude-3.5-haiku` | 200K | | | | | | — | — |
|
|
38
|
+
| `qiniu-ai/claude-3.5-sonnet` | 200K | | | | | | — | — |
|
|
39
|
+
| `qiniu-ai/claude-3.7-sonnet` | 200K | | | | | | — | — |
|
|
40
|
+
| `qiniu-ai/claude-4.0-opus` | 200K | | | | | | — | — |
|
|
41
|
+
| `qiniu-ai/claude-4.0-sonnet` | 200K | | | | | | — | — |
|
|
42
|
+
| `qiniu-ai/claude-4.1-opus` | 200K | | | | | | — | — |
|
|
43
|
+
| `qiniu-ai/claude-4.5-haiku` | 200K | | | | | | — | — |
|
|
44
|
+
| `qiniu-ai/claude-4.5-opus` | 200K | | | | | | — | — |
|
|
45
|
+
| `qiniu-ai/claude-4.5-sonnet` | 200K | | | | | | — | — |
|
|
46
|
+
| `qiniu-ai/deepseek-r1` | 128K | | | | | | — | — |
|
|
47
|
+
| `qiniu-ai/deepseek-r1-0528` | 128K | | | | | | — | — |
|
|
48
|
+
| `qiniu-ai/deepseek-v3` | 128K | | | | | | — | — |
|
|
49
|
+
| `qiniu-ai/deepseek-v3-0324` | 128K | | | | | | — | — |
|
|
50
|
+
| `qiniu-ai/deepseek-v3.1` | 128K | | | | | | — | — |
|
|
51
|
+
| `qiniu-ai/deepseek/deepseek-math-v2` | 160K | | | | | | — | — |
|
|
52
|
+
| `qiniu-ai/deepseek/deepseek-v3.1-terminus` | 128K | | | | | | — | — |
|
|
53
|
+
| `qiniu-ai/deepseek/deepseek-v3.1-terminus-thinking` | 128K | | | | | | — | — |
|
|
54
|
+
| `qiniu-ai/deepseek/deepseek-v3.2-251201` | 128K | | | | | | — | — |
|
|
55
|
+
| `qiniu-ai/deepseek/deepseek-v3.2-exp` | 128K | | | | | | — | — |
|
|
56
|
+
| `qiniu-ai/deepseek/deepseek-v3.2-exp-thinking` | 128K | | | | | | — | — |
|
|
57
|
+
| `qiniu-ai/doubao-1.5-pro-32k` | 128K | | | | | | — | — |
|
|
58
|
+
| `qiniu-ai/doubao-1.5-thinking-pro` | 128K | | | | | | — | — |
|
|
59
|
+
| `qiniu-ai/doubao-1.5-vision-pro` | 128K | | | | | | — | — |
|
|
60
|
+
| `qiniu-ai/doubao-seed-1.6` | 256K | | | | | | — | — |
|
|
61
|
+
| `qiniu-ai/doubao-seed-1.6-flash` | 256K | | | | | | — | — |
|
|
62
|
+
| `qiniu-ai/doubao-seed-1.6-thinking` | 256K | | | | | | — | — |
|
|
63
|
+
| `qiniu-ai/gemini-2.0-flash` | 1.0M | | | | | | — | — |
|
|
64
|
+
| `qiniu-ai/gemini-2.0-flash-lite` | 1.0M | | | | | | — | — |
|
|
65
|
+
| `qiniu-ai/gemini-2.5-flash` | 1.0M | | | | | | — | — |
|
|
66
|
+
| `qiniu-ai/gemini-2.5-flash-image` | 33K | | | | | | — | — |
|
|
67
|
+
| `qiniu-ai/gemini-2.5-flash-lite` | 1.0M | | | | | | — | — |
|
|
68
|
+
| `qiniu-ai/gemini-2.5-pro` | 1.0M | | | | | | — | — |
|
|
69
|
+
| `qiniu-ai/gemini-3.0-flash-preview` | 1.0M | | | | | | — | — |
|
|
70
|
+
| `qiniu-ai/gemini-3.0-pro-image-preview` | 33K | | | | | | — | — |
|
|
71
|
+
| `qiniu-ai/gemini-3.0-pro-preview` | 1.0M | | | | | | — | — |
|
|
72
|
+
| `qiniu-ai/glm-4.5` | 131K | | | | | | — | — |
|
|
73
|
+
| `qiniu-ai/glm-4.5-air` | 131K | | | | | | — | — |
|
|
74
|
+
| `qiniu-ai/gpt-oss-120b` | 128K | | | | | | — | — |
|
|
75
|
+
| `qiniu-ai/gpt-oss-20b` | 128K | | | | | | — | — |
|
|
76
|
+
| `qiniu-ai/kimi-k2` | 128K | | | | | | — | — |
|
|
77
|
+
| `qiniu-ai/kling-v2-6` | 100.0M | | | | | | — | — |
|
|
78
|
+
| `qiniu-ai/meituan/longcat-flash-chat` | 131K | | | | | | — | — |
|
|
79
|
+
| `qiniu-ai/mimo-v2-flash` | 256K | | | | | | — | — |
|
|
80
|
+
| `qiniu-ai/MiniMax-M1` | 1.0M | | | | | | — | — |
|
|
81
|
+
| `qiniu-ai/minimax/minimax-m2` | 200K | | | | | | — | — |
|
|
82
|
+
| `qiniu-ai/minimax/minimax-m2.1` | 205K | | | | | | — | — |
|
|
83
|
+
| `qiniu-ai/moonshotai/kimi-k2-0905` | 256K | | | | | | — | — |
|
|
84
|
+
| `qiniu-ai/moonshotai/kimi-k2-thinking` | 256K | | | | | | — | — |
|
|
85
|
+
| `qiniu-ai/openai/gpt-5` | 400K | | | | | | — | — |
|
|
86
|
+
| `qiniu-ai/openai/gpt-5.2` | 400K | | | | | | — | — |
|
|
87
|
+
| `qiniu-ai/qwen-max-2025-01-25` | 128K | | | | | | — | — |
|
|
88
|
+
| `qiniu-ai/qwen-turbo` | 1.0M | | | | | | — | — |
|
|
89
|
+
| `qiniu-ai/qwen-vl-max-2025-01-25` | 128K | | | | | | — | — |
|
|
90
|
+
| `qiniu-ai/qwen2.5-vl-72b-instruct` | 128K | | | | | | — | — |
|
|
91
|
+
| `qiniu-ai/qwen2.5-vl-7b-instruct` | 128K | | | | | | — | — |
|
|
92
|
+
| `qiniu-ai/qwen3-235b-a22b` | 128K | | | | | | — | — |
|
|
93
|
+
| `qiniu-ai/qwen3-235b-a22b-instruct-2507` | 262K | | | | | | — | — |
|
|
94
|
+
| `qiniu-ai/qwen3-235b-a22b-thinking-2507` | 262K | | | | | | — | — |
|
|
95
|
+
| `qiniu-ai/qwen3-30b-a3b` | 40K | | | | | | — | — |
|
|
96
|
+
| `qiniu-ai/qwen3-32b` | 40K | | | | | | — | — |
|
|
97
|
+
| `qiniu-ai/qwen3-coder-480b-a35b-instruct` | 262K | | | | | | — | — |
|
|
98
|
+
| `qiniu-ai/qwen3-max` | 262K | | | | | | — | — |
|
|
99
|
+
| `qiniu-ai/qwen3-max-preview` | 256K | | | | | | — | — |
|
|
100
|
+
| `qiniu-ai/qwen3-next-80b-a3b-instruct` | 131K | | | | | | — | — |
|
|
101
|
+
| `qiniu-ai/qwen3-next-80b-a3b-thinking` | 131K | | | | | | — | — |
|
|
102
|
+
| `qiniu-ai/stepfun-ai/gelab-zero-4b-preview` | 8K | | | | | | — | — |
|
|
103
|
+
| `qiniu-ai/x-ai/grok-4-fast` | 2.0M | | | | | | — | — |
|
|
104
|
+
| `qiniu-ai/x-ai/grok-4-fast-non-reasoning` | 2.0M | | | | | | — | — |
|
|
105
|
+
| `qiniu-ai/x-ai/grok-4-fast-reasoning` | 2.0M | | | | | | — | — |
|
|
106
|
+
| `qiniu-ai/x-ai/grok-4.1-fast` | 2.0M | | | | | | — | — |
|
|
107
|
+
| `qiniu-ai/x-ai/grok-4.1-fast-non-reasoning` | 2.0M | | | | | | — | — |
|
|
108
|
+
| `qiniu-ai/x-ai/grok-4.1-fast-reasoning` | 20.0M | | | | | | — | — |
|
|
109
|
+
| `qiniu-ai/x-ai/grok-code-fast-1` | 256K | | | | | | — | — |
|
|
110
|
+
| `qiniu-ai/z-ai/autoglm-phone-9b` | 13K | | | | | | — | — |
|
|
111
|
+
| `qiniu-ai/z-ai/glm-4.6` | 200K | | | | | | — | — |
|
|
112
|
+
| `qiniu-ai/z-ai/glm-4.7` | 200K | | | | | | — | — |
|
|
113
|
+
|
|
114
|
+
## Advanced Configuration
|
|
115
|
+
|
|
116
|
+
### Custom Headers
|
|
117
|
+
|
|
118
|
+
```typescript
|
|
119
|
+
const agent = new Agent({
|
|
120
|
+
id: "custom-agent",
|
|
121
|
+
name: "custom-agent",
|
|
122
|
+
model: {
|
|
123
|
+
url: "https://api.qnaigc.com.com/v1",
|
|
124
|
+
id: "qiniu-ai/MiniMax-M1",
|
|
125
|
+
apiKey: process.env.Qiniu_API_KEY,
|
|
126
|
+
headers: {
|
|
127
|
+
"X-Custom-Header": "value"
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
});
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
### Dynamic Model Selection
|
|
134
|
+
|
|
135
|
+
```typescript
|
|
136
|
+
const agent = new Agent({
|
|
137
|
+
id: "dynamic-agent",
|
|
138
|
+
name: "Dynamic Agent",
|
|
139
|
+
model: ({ requestContext }) => {
|
|
140
|
+
const useAdvanced = requestContext.task === "complex";
|
|
141
|
+
return useAdvanced
|
|
142
|
+
? "qiniu-ai/z-ai/glm-4.7"
|
|
143
|
+
: "qiniu-ai/MiniMax-M1";
|
|
144
|
+
}
|
|
145
|
+
});
|
|
146
|
+
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# SiliconFlow (China)
|
|
2
2
|
|
|
3
|
-
Access
|
|
3
|
+
Access 72 SiliconFlow (China) models through Mastra's model router. Authentication is handled automatically using the `SILICONFLOW_CN_API_KEY` environment variable.
|
|
4
4
|
|
|
5
5
|
Learn more in the [SiliconFlow (China) documentation](https://cloud.siliconflow.com/models).
|
|
6
6
|
|
|
@@ -37,6 +37,7 @@ for await (const chunk of stream) {
|
|
|
37
37
|
| `siliconflow-cn/ascend-tribe/pangu-pro-moe` | 128K | | | | | | $0.20 | $0.60 |
|
|
38
38
|
| `siliconflow-cn/baidu/ERNIE-4.5-300B-A47B` | 131K | | | | | | $0.28 | $1 |
|
|
39
39
|
| `siliconflow-cn/ByteDance-Seed/Seed-OSS-36B-Instruct` | 262K | | | | | | $0.21 | $0.57 |
|
|
40
|
+
| `siliconflow-cn/deepseek-ai/DeepSeek-OCR` | 8K | | | | | | — | — |
|
|
40
41
|
| `siliconflow-cn/deepseek-ai/DeepSeek-R1` | 164K | | | | | | $0.50 | $2 |
|
|
41
42
|
| `siliconflow-cn/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B` | 131K | | | | | | $0.10 | $0.10 |
|
|
42
43
|
| `siliconflow-cn/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B` | 131K | | | | | | $0.18 | $0.18 |
|
|
@@ -50,11 +51,14 @@ for await (const chunk of stream) {
|
|
|
50
51
|
| `siliconflow-cn/Kwaipilot/KAT-Dev` | 128K | | | | | | $0.20 | $0.60 |
|
|
51
52
|
| `siliconflow-cn/moonshotai/Kimi-K2-Instruct-0905` | 262K | | | | | | $0.40 | $2 |
|
|
52
53
|
| `siliconflow-cn/moonshotai/Kimi-K2-Thinking` | 262K | | | | | | $0.55 | $3 |
|
|
54
|
+
| `siliconflow-cn/PaddlePaddle/PaddleOCR-VL` | 16K | | | | | | — | — |
|
|
55
|
+
| `siliconflow-cn/PaddlePaddle/PaddleOCR-VL-1.5` | 16K | | | | | | — | — |
|
|
53
56
|
| `siliconflow-cn/Pro/deepseek-ai/DeepSeek-R1` | 164K | | | | | | $0.50 | $2 |
|
|
54
57
|
| `siliconflow-cn/Pro/deepseek-ai/DeepSeek-V3` | 164K | | | | | | $0.25 | $1 |
|
|
55
58
|
| `siliconflow-cn/Pro/deepseek-ai/DeepSeek-V3.1-Terminus` | 164K | | | | | | $0.27 | $1 |
|
|
56
59
|
| `siliconflow-cn/Pro/deepseek-ai/DeepSeek-V3.2` | 164K | | | | | | $0.27 | $0.42 |
|
|
57
60
|
| `siliconflow-cn/Pro/MiniMaxAI/MiniMax-M2.1` | 197K | | | | | | $0.30 | $1 |
|
|
61
|
+
| `siliconflow-cn/Pro/MiniMaxAI/MiniMax-M2.5` | 192K | | | | | | $0.30 | $1 |
|
|
58
62
|
| `siliconflow-cn/Pro/moonshotai/Kimi-K2-Instruct-0905` | 262K | | | | | | $0.40 | $2 |
|
|
59
63
|
| `siliconflow-cn/Pro/moonshotai/Kimi-K2-Thinking` | 262K | | | | | | $0.55 | $3 |
|
|
60
64
|
| `siliconflow-cn/Pro/moonshotai/Kimi-K2.5` | 262K | | | | | | $0.55 | $3 |
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Together AI
|
|
2
2
|
|
|
3
|
-
Access
|
|
3
|
+
Access 19 Together AI models through Mastra's model router. Authentication is handled automatically using the `TOGETHER_API_KEY` environment variable.
|
|
4
4
|
|
|
5
5
|
Learn more in the [Together AI documentation](https://docs.together.ai/docs/serverless-models).
|
|
6
6
|
|
|
@@ -47,6 +47,7 @@ for await (const chunk of stream) {
|
|
|
47
47
|
| `togetherai/Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8` | 262K | | | | | | $2 | $2 |
|
|
48
48
|
| `togetherai/Qwen/Qwen3-Coder-Next-FP8` | 262K | | | | | | $0.50 | $1 |
|
|
49
49
|
| `togetherai/Qwen/Qwen3-Next-80B-A3B-Instruct` | 262K | | | | | | $0.15 | $2 |
|
|
50
|
+
| `togetherai/Qwen/Qwen3.5-397B-A17B` | 262K | | | | | | $0.60 | $4 |
|
|
50
51
|
| `togetherai/zai-org/GLM-4.6` | 200K | | | | | | $0.60 | $2 |
|
|
51
52
|
| `togetherai/zai-org/GLM-4.7` | 200K | | | | | | $0.45 | $2 |
|
|
52
53
|
| `togetherai/zai-org/GLM-5` | 203K | | | | | | $1 | $3 |
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# ZenMux
|
|
2
2
|
|
|
3
|
-
Access
|
|
3
|
+
Access 67 ZenMux models through Mastra's model router. Authentication is handled automatically using the `ZENMUX_API_KEY` environment variable.
|
|
4
4
|
|
|
5
5
|
Learn more in the [ZenMux documentation](https://docs.zenmux.ai).
|
|
6
6
|
|
|
@@ -44,6 +44,7 @@ for await (const chunk of stream) {
|
|
|
44
44
|
| `zenmux/anthropic/claude-opus-4.6` | 1.0M | | | | | | $5 | $25 |
|
|
45
45
|
| `zenmux/anthropic/claude-sonnet-4` | 1.0M | | | | | | $3 | $15 |
|
|
46
46
|
| `zenmux/anthropic/claude-sonnet-4.5` | 1.0M | | | | | | $3 | $15 |
|
|
47
|
+
| `zenmux/anthropic/claude-sonnet-4.6` | 1.0M | | | | | | $3 | $15 |
|
|
47
48
|
| `zenmux/baidu/ernie-5.0-thinking-preview` | 128K | | | | | | $0.84 | $3 |
|
|
48
49
|
| `zenmux/deepseek/deepseek-chat` | 128K | | | | | | $0.28 | $0.42 |
|
|
49
50
|
| `zenmux/deepseek/deepseek-v3.2` | 128K | | | | | | $0.28 | $0.43 |
|
|
@@ -79,6 +80,9 @@ for await (const chunk of stream) {
|
|
|
79
80
|
| `zenmux/stepfun/step-3.5-flash` | 256K | | | | | | $0.10 | $0.30 |
|
|
80
81
|
| `zenmux/stepfun/step-3.5-flash-free` | 256K | | | | | | — | — |
|
|
81
82
|
| `zenmux/volcengine/doubao-seed-1.8` | 256K | | | | | | $0.11 | $0.28 |
|
|
83
|
+
| `zenmux/volcengine/doubao-seed-2.0-lite` | 256K | | | | | | $0.09 | $0.51 |
|
|
84
|
+
| `zenmux/volcengine/doubao-seed-2.0-mini` | 256K | | | | | | $0.03 | $0.28 |
|
|
85
|
+
| `zenmux/volcengine/doubao-seed-2.0-pro` | 256K | | | | | | $0.45 | $2 |
|
|
82
86
|
| `zenmux/volcengine/doubao-seed-code` | 256K | | | | | | $0.17 | $1 |
|
|
83
87
|
| `zenmux/x-ai/grok-4` | 256K | | | | | | $3 | $15 |
|
|
84
88
|
| `zenmux/x-ai/grok-4-fast` | 2.0M | | | | | | $0.20 | $0.50 |
|