@mastra/mcp-docs-server 1.1.22-alpha.1 → 1.1.22-alpha.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,163 @@
1
+ # Arthur exporter
2
+
3
+ [Arthur](https://arthur.ai/) provides an observability and evaluation platform for AI applications through [Arthur Engine](https://github.com/arthur-ai/arthur-engine) (open-source). The Arthur exporter sends traces using OpenTelemetry and [OpenInference](https://github.com/Arize-ai/openinference/tree/main/spec) semantic conventions.
4
+
5
+ ## Installation
6
+
7
+ **npm**:
8
+
9
+ ```bash
10
+ npm install @mastra/arthur@latest
11
+ ```
12
+
13
+ **pnpm**:
14
+
15
+ ```bash
16
+ pnpm add @mastra/arthur@latest
17
+ ```
18
+
19
+ **Yarn**:
20
+
21
+ ```bash
22
+ yarn add @mastra/arthur@latest
23
+ ```
24
+
25
+ **Bun**:
26
+
27
+ ```bash
28
+ bun add @mastra/arthur@latest
29
+ ```
30
+
31
+ ## Configuration
32
+
33
+ ### Prerequisites
34
+
35
+ 1. **Arthur Engine instance**: Follow the [Docker Compose deployment guide](https://docs.arthur.ai/docs/arthur-genai-engine-docker-compose-deployment-guide) to start an Arthur Engine
36
+ 2. **API key**: [Generate an API key](https://docs.arthur.ai/docs/api-keys-management) from the Arthur Engine UI at `http://localhost:3030`
37
+ 3. **Task ID** (optional): [Create a task](https://docs.arthur.ai/docs/create-a-task) to route traces to a specific task
38
+
39
+ ### Task routing
40
+
41
+ Arthur Engine associates traces with tasks in two ways:
42
+
43
+ - **By service name**: Set `serviceName` in the observability config. Arthur Engine automatically routes traces to the task matching that name, creating it if needed.
44
+ - **By task ID**: Pass a pre-existing `taskId` to the exporter to send traces to a specific task directly.
45
+
46
+ If both are provided, `taskId` takes precedence.
47
+
48
+ ### Environment variables
49
+
50
+ ```bash
51
+ # Required
52
+ ARTHUR_API_KEY=your-api-key
53
+ ARTHUR_BASE_URL=http://localhost:3030
54
+
55
+ # Optional - route traces to a pre-existing task by ID
56
+ ARTHUR_TASK_ID=your-task-id
57
+ ```
58
+
59
+ ### Zero-Config Setup
60
+
61
+ With environment variables set, use the exporter with no configuration:
62
+
63
+ ```typescript
64
+ import { Mastra } from '@mastra/core'
65
+ import { Observability } from '@mastra/observability'
66
+ import { ArthurExporter } from '@mastra/arthur'
67
+
68
+ export const mastra = new Mastra({
69
+ observability: new Observability({
70
+ configs: {
71
+ arthur: {
72
+ serviceName: 'my-service',
73
+ exporters: [new ArthurExporter()],
74
+ },
75
+ },
76
+ }),
77
+ })
78
+ ```
79
+
80
+ ### Explicit Configuration
81
+
82
+ You can also pass credentials directly (takes precedence over environment variables):
83
+
84
+ ```typescript
85
+ import { Mastra } from '@mastra/core'
86
+ import { Observability } from '@mastra/observability'
87
+ import { ArthurExporter } from '@mastra/arthur'
88
+
89
+ export const mastra = new Mastra({
90
+ observability: new Observability({
91
+ configs: {
92
+ arthur: {
93
+ serviceName: 'my-service',
94
+ exporters: [
95
+ new ArthurExporter({
96
+ apiKey: process.env.ARTHUR_API_KEY!,
97
+ endpoint: process.env.ARTHUR_BASE_URL!,
98
+ taskId: process.env.ARTHUR_TASK_ID,
99
+ }),
100
+ ],
101
+ },
102
+ },
103
+ }),
104
+ })
105
+ ```
106
+
107
+ ## Configuration options
108
+
109
+ ### Complete Configuration
110
+
111
+ ```typescript
112
+ new ArthurExporter({
113
+ // Arthur Configuration
114
+ apiKey: 'your-api-key', // Required
115
+ endpoint: 'http://localhost:3030', // Required
116
+ taskId: 'your-task-id', // Optional
117
+
118
+ // Optional OTLP settings
119
+ headers: {
120
+ 'x-custom-header': 'value', // Additional headers for OTLP requests
121
+ },
122
+
123
+ // Debug and performance tuning
124
+ logLevel: 'debug', // Logging: debug | info | warn | error
125
+ batchSize: 512, // Batch size before exporting spans
126
+ timeout: 30000, // Timeout in ms before exporting spans
127
+
128
+ // Custom resource attributes
129
+ resourceAttributes: {
130
+ 'deployment.environment': process.env.NODE_ENV,
131
+ 'service.version': process.env.APP_VERSION,
132
+ },
133
+ })
134
+ ```
135
+
136
+ ### Custom metadata
137
+
138
+ Non-reserved span attributes are serialized into the OpenInference `metadata` payload and surface in Arthur. You can add them via `tracingOptions.metadata`:
139
+
140
+ ```ts
141
+ await agent.generate(input, {
142
+ tracingOptions: {
143
+ metadata: {
144
+ companyId: 'acme-co',
145
+ tier: 'enterprise',
146
+ },
147
+ },
148
+ })
149
+ ```
150
+
151
+ Reserved fields such as `input`, `output`, `sessionId`, thread/user IDs, and OpenInference IDs are excluded automatically.
152
+
153
+ ## OpenInference semantic conventions
154
+
155
+ This exporter implements the [OpenInference Semantic Conventions](https://github.com/Arize-ai/openinference/tree/main/spec) for generative AI applications, providing standardized trace structure across different observability platforms.
156
+
157
+ ## Related
158
+
159
+ - [Tracing Overview](https://mastra.ai/docs/observability/tracing/overview)
160
+ - [ArthurExporter reference](https://mastra.ai/reference/observability/tracing/exporters/arthur)
161
+ - [Arthur Engine documentation](https://docs.arthur.ai/)
162
+ - [Arthur Engine repository](https://github.com/arthur-ai/arthur-engine)
163
+ - [OpenInference Specification](https://github.com/Arize-ai/openinference/tree/main/spec)
@@ -82,9 +82,8 @@ export const mastra = new Mastra({
82
82
  publicKey: process.env.LANGFUSE_PUBLIC_KEY!,
83
83
  secretKey: process.env.LANGFUSE_SECRET_KEY!,
84
84
  baseUrl: process.env.LANGFUSE_BASE_URL,
85
- options: {
86
- environment: process.env.NODE_ENV,
87
- },
85
+ environment: process.env.NODE_ENV,
86
+ release: process.env.GIT_COMMIT,
88
87
  }),
89
88
  ],
90
89
  },
@@ -136,12 +135,9 @@ new LangfuseExporter({
136
135
  realtime: process.env.NODE_ENV === 'development', // Dynamic mode selection
137
136
  logLevel: 'info', // Diagnostic logging: debug | info | warn | error
138
137
 
139
- // Langfuse-specific options
140
- options: {
141
- environment: process.env.NODE_ENV, // Shows in UI for filtering
142
- version: process.env.APP_VERSION, // Track different versions
143
- release: process.env.GIT_COMMIT, // Git commit hash
144
- },
138
+ // Langfuse-specific settings
139
+ environment: process.env.NODE_ENV, // Shows in Langfuse UI for filtering
140
+ release: process.env.GIT_COMMIT, // Git commit hash for version tracking
145
141
  })
146
142
  ```
147
143
 
@@ -156,26 +152,26 @@ Use `withLangfusePrompt` with `buildTracingOptions` for the cleanest API:
156
152
  ```typescript
157
153
  import { Agent } from '@mastra/core/agent'
158
154
  import { buildTracingOptions } from '@mastra/observability'
159
- import { withLangfusePrompt } from '@mastra/langfuse'
160
- import { Langfuse } from 'langfuse'
155
+ import { LangfuseExporter, withLangfusePrompt } from '@mastra/langfuse'
161
156
 
162
- // Reads credentials from LANGFUSE_SECRET_KEY, LANGFUSE_PUBLIC_KEY, LANGFUSE_BASE_URL env vars
163
- const langfuse = new Langfuse()
157
+ const exporter = new LangfuseExporter()
164
158
 
165
- // Fetch the prompt from Langfuse Prompt Management
166
- const prompt = await langfuse.getPrompt('customer-support')
159
+ // Fetch the prompt from Langfuse Prompt Management via the client
160
+ const prompt = await exporter.client.prompt.get('customer-support', { type: 'text' })
167
161
 
168
162
  export const supportAgent = new Agent({
169
163
  name: 'support-agent',
170
- instructions: prompt.prompt, // Use the prompt text from Langfuse
164
+ instructions: prompt.compile(), // Use the prompt text from Langfuse
171
165
  model: 'openai/gpt-5.4',
172
166
  defaultGenerateOptions: {
173
- tracingOptions: buildTracingOptions(withLangfusePrompt(prompt)),
167
+ tracingOptions: buildTracingOptions(
168
+ withLangfusePrompt({ name: prompt.name, version: prompt.version }),
169
+ ),
174
170
  },
175
171
  })
176
172
  ```
177
173
 
178
- The `withLangfusePrompt` helper automatically extracts `name`, `version`, and `id` from the Langfuse prompt object.
174
+ The `withLangfusePrompt` helper accepts `name` and `version` fields for prompt linking. Langfuse v5 requires both fields.
179
175
 
180
176
  ### Manual Fields
181
177
 
@@ -183,26 +179,16 @@ You can also pass manual fields if you're not using the Langfuse SDK:
183
179
 
184
180
  ```typescript
185
181
  const tracingOptions = buildTracingOptions(withLangfusePrompt({ name: 'my-prompt', version: 1 }))
186
-
187
- // Or with just an ID
188
- const tracingOptions = buildTracingOptions(withLangfusePrompt({ id: 'prompt-uuid-12345' }))
189
182
  ```
190
183
 
191
184
  ### Prompt Object Fields
192
185
 
193
- The prompt object supports these fields:
194
-
195
- | Field | Type | Description |
196
- | --------- | ------ | ---------------------------------- |
197
- | `name` | string | The prompt name in Langfuse |
198
- | `version` | number | The prompt version number |
199
- | `id` | string | The prompt UUID for direct linking |
200
-
201
- You can link prompts using either:
186
+ The prompt object requires both `name` and `version`:
202
187
 
203
- - `id` alone (the UUID uniquely identifies a prompt version)
204
- - `name` + `version` together
205
- - All three fields
188
+ | Field | Type | Description |
189
+ | --------- | ------ | --------------------------- |
190
+ | `name` | string | The prompt name in Langfuse |
191
+ | `version` | number | The prompt version number |
206
192
 
207
193
  When set on a `MODEL_GENERATION` span, the Langfuse exporter automatically links the generation to the corresponding prompt.
208
194
 
@@ -1,6 +1,6 @@
1
1
  # ![OpenRouter logo](https://models.dev/logos/openrouter.svg)OpenRouter
2
2
 
3
- OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 168 models through Mastra's model router.
3
+ OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 170 models through Mastra's model router.
4
4
 
5
5
  Learn more in the [OpenRouter documentation](https://openrouter.ai/models).
6
6
 
@@ -45,6 +45,7 @@ ANTHROPIC_API_KEY=ant-...
45
45
  | `anthropic/claude-sonnet-4.5` |
46
46
  | `anthropic/claude-sonnet-4.6` |
47
47
  | `arcee-ai/trinity-large-preview:free` |
48
+ | `arcee-ai/trinity-large-thinking` |
48
49
  | `arcee-ai/trinity-mini:free` |
49
50
  | `black-forest-labs/flux.2-flex` |
50
51
  | `black-forest-labs/flux.2-klein-4b` |
@@ -82,6 +83,8 @@ ANTHROPIC_API_KEY=ant-...
82
83
  | `google/gemma-3n-e2b-it:free` |
83
84
  | `google/gemma-3n-e4b-it` |
84
85
  | `google/gemma-3n-e4b-it:free` |
86
+ | `google/gemma-4-26b-a4b-it` |
87
+ | `google/gemma-4-31b-it` |
85
88
  | `inception/mercury` |
86
89
  | `inception/mercury-2` |
87
90
  | `inception/mercury-coder` |
@@ -172,7 +175,6 @@ ANTHROPIC_API_KEY=ant-...
172
175
  | `qwen/qwen3-next-80b-a3b-thinking` |
173
176
  | `qwen/qwen3.5-397b-a17b` |
174
177
  | `qwen/qwen3.5-plus-02-15` |
175
- | `qwen/qwen3.6-plus-preview:free` |
176
178
  | `qwen/qwen3.6-plus:free` |
177
179
  | `sourceful/riverflow-v2-fast-preview` |
178
180
  | `sourceful/riverflow-v2-max-preview` |
@@ -1,6 +1,6 @@
1
1
  # Model Providers
2
2
 
3
- Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3601 models from 98 providers through a single API.
3
+ Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3605 models from 98 providers through a single API.
4
4
 
5
5
  ## Features
6
6
 
@@ -101,7 +101,7 @@ for await (const chunk of stream) {
101
101
  | `alibaba-cn/qwen3.5-397b-a17b` | 262K | | | | | | $0.43 | $3 |
102
102
  | `alibaba-cn/qwen3.5-flash` | 1.0M | | | | | | $0.17 | $2 |
103
103
  | `alibaba-cn/qwen3.5-plus` | 1.0M | | | | | | $0.57 | $3 |
104
- | `alibaba-cn/qwen3.6-plus` | 1.0M | | | | | | $1 | $7 |
104
+ | `alibaba-cn/qwen3.6-plus` | 1.0M | | | | | | $0.28 | $2 |
105
105
  | `alibaba-cn/qwq-32b` | 131K | | | | | | $0.29 | $0.86 |
106
106
  | `alibaba-cn/qwq-plus` | 131K | | | | | | $0.23 | $0.57 |
107
107
  | `alibaba-cn/siliconflow/deepseek-r1-0528` | 164K | | | | | | $0.50 | $2 |
@@ -1,6 +1,6 @@
1
1
  # ![Alibaba Coding Plan (China) logo](https://models.dev/logos/alibaba-coding-plan-cn.svg)Alibaba Coding Plan (China)
2
2
 
3
- Access 8 Alibaba Coding Plan (China) models through Mastra's model router. Authentication is handled automatically using the `ALIBABA_CODING_PLAN_API_KEY` environment variable.
3
+ Access 9 Alibaba Coding Plan (China) models through Mastra's model router. Authentication is handled automatically using the `ALIBABA_CODING_PLAN_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Alibaba Coding Plan (China) documentation](https://help.aliyun.com/zh/model-studio/coding-plan).
6
6
 
@@ -42,6 +42,7 @@ for await (const chunk of stream) {
42
42
  | `alibaba-coding-plan-cn/qwen3-coder-plus` | 1.0M | | | | | | — | — |
43
43
  | `alibaba-coding-plan-cn/qwen3-max-2026-01-23` | 262K | | | | | | — | — |
44
44
  | `alibaba-coding-plan-cn/qwen3.5-plus` | 1.0M | | | | | | — | — |
45
+ | `alibaba-coding-plan-cn/qwen3.6-plus` | 1.0M | | | | | | — | — |
45
46
 
46
47
  ## Advanced configuration
47
48
 
@@ -71,7 +72,7 @@ const agent = new Agent({
71
72
  model: ({ requestContext }) => {
72
73
  const useAdvanced = requestContext.task === "complex";
73
74
  return useAdvanced
74
- ? "alibaba-coding-plan-cn/qwen3.5-plus"
75
+ ? "alibaba-coding-plan-cn/qwen3.6-plus"
75
76
  : "alibaba-coding-plan-cn/MiniMax-M2.5";
76
77
  }
77
78
  });
@@ -1,6 +1,6 @@
1
1
  # ![Alibaba Coding Plan logo](https://models.dev/logos/alibaba-coding-plan.svg)Alibaba Coding Plan
2
2
 
3
- Access 8 Alibaba Coding Plan models through Mastra's model router. Authentication is handled automatically using the `ALIBABA_CODING_PLAN_API_KEY` environment variable.
3
+ Access 9 Alibaba Coding Plan models through Mastra's model router. Authentication is handled automatically using the `ALIBABA_CODING_PLAN_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Alibaba Coding Plan documentation](https://www.alibabacloud.com/help/en/model-studio/coding-plan).
6
6
 
@@ -42,6 +42,7 @@ for await (const chunk of stream) {
42
42
  | `alibaba-coding-plan/qwen3-coder-plus` | 1.0M | | | | | | — | — |
43
43
  | `alibaba-coding-plan/qwen3-max-2026-01-23` | 262K | | | | | | — | — |
44
44
  | `alibaba-coding-plan/qwen3.5-plus` | 1.0M | | | | | | — | — |
45
+ | `alibaba-coding-plan/qwen3.6-plus` | 1.0M | | | | | | — | — |
45
46
 
46
47
  ## Advanced configuration
47
48
 
@@ -71,7 +72,7 @@ const agent = new Agent({
71
72
  model: ({ requestContext }) => {
72
73
  const useAdvanced = requestContext.task === "complex";
73
74
  return useAdvanced
74
- ? "alibaba-coding-plan/qwen3.5-plus"
75
+ ? "alibaba-coding-plan/qwen3.6-plus"
75
76
  : "alibaba-coding-plan/MiniMax-M2.5";
76
77
  }
77
78
  });
@@ -1,6 +1,6 @@
1
1
  # ![Alibaba logo](https://models.dev/logos/alibaba.svg)Alibaba
2
2
 
3
- Access 41 Alibaba models through Mastra's model router. Authentication is handled automatically using the `DASHSCOPE_API_KEY` environment variable.
3
+ Access 42 Alibaba models through Mastra's model router. Authentication is handled automatically using the `DASHSCOPE_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Alibaba documentation](https://www.alibabacloud.com/help/en/model-studio/models).
6
6
 
@@ -74,6 +74,7 @@ for await (const chunk of stream) {
74
74
  | `alibaba/qwen3-vl-plus` | 262K | | | | | | $0.20 | $2 |
75
75
  | `alibaba/qwen3.5-397b-a17b` | 262K | | | | | | $0.60 | $4 |
76
76
  | `alibaba/qwen3.5-plus` | 1.0M | | | | | | $0.40 | $2 |
77
+ | `alibaba/qwen3.6-plus` | 1.0M | | | | | | $0.28 | $2 |
77
78
  | `alibaba/qwq-plus` | 131K | | | | | | $0.80 | $2 |
78
79
 
79
80
  ## Advanced configuration
@@ -32,7 +32,7 @@ for await (const chunk of stream) {
32
32
 
33
33
  | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
34
34
  | ---------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
35
- | `deepseek/deepseek-chat` | 128K | | | | | | $0.28 | $0.42 |
35
+ | `deepseek/deepseek-chat` | 131K | | | | | | $0.28 | $0.42 |
36
36
  | `deepseek/deepseek-reasoner` | 128K | | | | | | $0.28 | $0.42 |
37
37
 
38
38
  ## Advanced configuration
@@ -67,8 +67,8 @@ for await (const chunk of stream) {
67
67
  | `google/gemma-3-4b-it` | 33K | | | | | | — | — |
68
68
  | `google/gemma-3n-e2b-it` | 8K | | | | | | — | — |
69
69
  | `google/gemma-3n-e4b-it` | 8K | | | | | | — | — |
70
- | `google/gemma-4-26b` | 256K | | | | | | — | — |
71
- | `google/gemma-4-31b` | 256K | | | | | | — | — |
70
+ | `google/gemma-4-26b-it` | 256K | | | | | | — | — |
71
+ | `google/gemma-4-31b-it` | 256K | | | | | | — | — |
72
72
 
73
73
  ## Advanced configuration
74
74
 
@@ -97,7 +97,7 @@ const agent = new Agent({
97
97
  model: ({ requestContext }) => {
98
98
  const useAdvanced = requestContext.task === "complex";
99
99
  return useAdvanced
100
- ? "google/gemma-4-31b"
100
+ ? "google/gemma-4-31b-it"
101
101
  : "google/gemini-1.5-flash";
102
102
  }
103
103
  });
@@ -1,6 +1,6 @@
1
1
  # ![Nvidia logo](https://models.dev/logos/nvidia.svg)Nvidia
2
2
 
3
- Access 74 Nvidia models through Mastra's model router. Authentication is handled automatically using the `NVIDIA_API_KEY` environment variable.
3
+ Access 75 Nvidia models through Mastra's model router. Authentication is handled automatically using the `NVIDIA_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Nvidia documentation](https://docs.api.nvidia.com/nim/).
6
6
 
@@ -50,6 +50,7 @@ for await (const chunk of stream) {
50
50
  | `nvidia/google/gemma-3-27b-it` | 131K | | | | | | — | — |
51
51
  | `nvidia/google/gemma-3n-e2b-it` | 128K | | | | | | — | — |
52
52
  | `nvidia/google/gemma-3n-e4b-it` | 128K | | | | | | — | — |
53
+ | `nvidia/google/gemma-4-31b-it` | 256K | | | | | | — | — |
53
54
  | `nvidia/meta/codellama-70b` | 128K | | | | | | — | — |
54
55
  | `nvidia/meta/llama-3.1-405b-instruct` | 128K | | | | | | — | — |
55
56
  | `nvidia/meta/llama-3.1-70b-instruct` | 128K | | | | | | — | — |
@@ -1,6 +1,6 @@
1
1
  # ![Together AI logo](https://models.dev/logos/togetherai.svg)Together AI
2
2
 
3
- Access 17 Together AI models through Mastra's model router. Authentication is handled automatically using the `TOGETHER_API_KEY` environment variable.
3
+ Access 13 Together AI models through Mastra's model router. Authentication is handled automatically using the `TOGETHER_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Together AI documentation](https://docs.together.ai/docs/serverless-models).
6
6
 
@@ -38,16 +38,12 @@ for await (const chunk of stream) {
38
38
  | `togetherai/essentialai/Rnj-1-Instruct` | 33K | | | | | | $0.15 | $0.15 |
39
39
  | `togetherai/meta-llama/Llama-3.3-70B-Instruct-Turbo` | 131K | | | | | | $0.88 | $0.88 |
40
40
  | `togetherai/MiniMaxAI/MiniMax-M2.5` | 205K | | | | | | $0.30 | $1 |
41
- | `togetherai/moonshotai/Kimi-K2-Instruct` | 131K | | | | | | $1 | $3 |
42
41
  | `togetherai/moonshotai/Kimi-K2.5` | 262K | | | | | | $0.50 | $3 |
43
42
  | `togetherai/openai/gpt-oss-120b` | 131K | | | | | | $0.15 | $0.60 |
44
43
  | `togetherai/Qwen/Qwen3-235B-A22B-Instruct-2507-tput` | 262K | | | | | | $0.20 | $0.60 |
45
44
  | `togetherai/Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8` | 262K | | | | | | $2 | $2 |
46
45
  | `togetherai/Qwen/Qwen3-Coder-Next-FP8` | 262K | | | | | | $0.50 | $1 |
47
- | `togetherai/Qwen/Qwen3-Next-80B-A3B-Instruct` | 262K | | | | | | $0.15 | $2 |
48
46
  | `togetherai/Qwen/Qwen3.5-397B-A17B` | 262K | | | | | | $0.60 | $4 |
49
- | `togetherai/zai-org/GLM-4.6` | 200K | | | | | | $0.60 | $2 |
50
- | `togetherai/zai-org/GLM-4.7` | 200K | | | | | | $0.45 | $2 |
51
47
  | `togetherai/zai-org/GLM-5` | 203K | | | | | | $1 | $3 |
52
48
 
53
49
  ## Advanced configuration
@@ -1,6 +1,6 @@
1
1
  # ![Vivgrid logo](https://models.dev/logos/vivgrid.svg)Vivgrid
2
2
 
3
- Access 9 Vivgrid models through Mastra's model router. Authentication is handled automatically using the `VIVGRID_API_KEY` environment variable.
3
+ Access 12 Vivgrid models through Mastra's model router. Authentication is handled automatically using the `VIVGRID_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Vivgrid documentation](https://docs.vivgrid.com/models).
6
6
 
@@ -42,7 +42,10 @@ for await (const chunk of stream) {
42
42
  | `vivgrid/gpt-5.1-codex` | 400K | | | | | | $1 | $10 |
43
43
  | `vivgrid/gpt-5.1-codex-max` | 400K | | | | | | $1 | $10 |
44
44
  | `vivgrid/gpt-5.2-codex` | 400K | | | | | | $2 | $14 |
45
+ | `vivgrid/gpt-5.3-codex` | 400K | | | | | | $2 | $14 |
45
46
  | `vivgrid/gpt-5.4` | 400K | | | | | | $3 | $15 |
47
+ | `vivgrid/gpt-5.4-mini` | 400K | | | | | | $0.75 | $5 |
48
+ | `vivgrid/gpt-5.4-nano` | 400K | | | | | | $0.20 | $1 |
46
49
 
47
50
  ## Advanced configuration
48
51
 
@@ -72,7 +75,7 @@ const agent = new Agent({
72
75
  model: ({ requestContext }) => {
73
76
  const useAdvanced = requestContext.task === "complex";
74
77
  return useAdvanced
75
- ? "vivgrid/gpt-5.4"
78
+ ? "vivgrid/gpt-5.4-nano"
76
79
  : "vivgrid/deepseek-v3.2";
77
80
  }
78
81
  });
@@ -1,6 +1,6 @@
1
1
  # ![Vultr logo](https://models.dev/logos/vultr.svg)Vultr
2
2
 
3
- Access 4 Vultr models through Mastra's model router. Authentication is handled automatically using the `VULTR_API_KEY` environment variable.
3
+ Access 5 Vultr models through Mastra's model router. Authentication is handled automatically using the `VULTR_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Vultr documentation](https://api.vultrinference.com/).
6
6
 
@@ -34,10 +34,11 @@ for await (const chunk of stream) {
34
34
 
35
35
  | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
36
  | --------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
- | `vultr/DeepSeek-V3.2` | 163K | | | | | | $0.55 | $2 |
38
- | `vultr/GLM-5-FP8` | 202K | | | | | | $0.85 | $3 |
39
- | `vultr/Kimi-K2.5` | 261K | | | | | | $0.55 | $3 |
40
- | `vultr/MiniMax-M2.5` | 196K | | | | | | $0.30 | $1 |
37
+ | `vultr/DeepSeek-V3.2` | 127K | | | | | | $0.55 | $2 |
38
+ | `vultr/GLM-5-FP8` | 200K | | | | | | $0.85 | $3 |
39
+ | `vultr/gpt-oss-120b` | 129K | | | | | | $0.15 | $0.60 |
40
+ | `vultr/Kimi-K2.5` | 254K | | | | | | $0.55 | $3 |
41
+ | `vultr/MiniMax-M2.5` | 194K | | | | | | $0.30 | $1 |
41
42
 
42
43
  ## Advanced configuration
43
44
 
@@ -67,7 +68,7 @@ const agent = new Agent({
67
68
  model: ({ requestContext }) => {
68
69
  const useAdvanced = requestContext.task === "complex";
69
70
  return useAdvanced
70
- ? "vultr/MiniMax-M2.5"
71
+ ? "vultr/gpt-oss-120b"
71
72
  : "vultr/DeepSeek-V3.2";
72
73
  }
73
74
  });
@@ -1,6 +1,6 @@
1
1
  # ![ZenMux logo](https://models.dev/logos/zenmux.svg)ZenMux
2
2
 
3
- Access 88 ZenMux models through Mastra's model router. Authentication is handled automatically using the `ZENMUX_API_KEY` environment variable.
3
+ Access 86 ZenMux models through Mastra's model router. Authentication is handled automatically using the `ZENMUX_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [ZenMux documentation](https://docs.zenmux.ai).
6
6
 
@@ -52,13 +52,10 @@ for await (const chunk of stream) {
52
52
  | `zenmux/google/gemini-2.5-flash-lite` | 1.0M | | | | | | $0.10 | $0.40 |
53
53
  | `zenmux/google/gemini-2.5-pro` | 1.0M | | | | | | $1 | $10 |
54
54
  | `zenmux/google/gemini-3-flash-preview` | 1.0M | | | | | | $0.50 | $3 |
55
- | `zenmux/google/gemini-3-pro-preview` | 1.0M | | | | | | $2 | $12 |
56
55
  | `zenmux/google/gemini-3.1-flash-lite-preview` | 1.1M | | | | | | $0.25 | $2 |
57
56
  | `zenmux/google/gemini-3.1-pro-preview` | 1.0M | | | | | | $2 | $12 |
58
57
  | `zenmux/inclusionai/ling-1t` | 128K | | | | | | $0.56 | $2 |
59
58
  | `zenmux/inclusionai/ring-1t` | 128K | | | | | | $0.56 | $2 |
60
- | `zenmux/kuaishou/kat-coder-pro-v1` | 256K | | | | | | $0.30 | $1 |
61
- | `zenmux/kuaishou/kat-coder-pro-v1-free` | 256K | | | | | | — | — |
62
59
  | `zenmux/kuaishou/kat-coder-pro-v2` | 256K | | | | | | $0.30 | $1 |
63
60
  | `zenmux/minimax/minimax-m2` | 204K | | | | | | $0.30 | $1 |
64
61
  | `zenmux/minimax/minimax-m2.1` | 204K | | | | | | $0.30 | $1 |
@@ -90,6 +87,8 @@ for await (const chunk of stream) {
90
87
  | `zenmux/qwen/qwen3.5-flash` | 1.0M | | | | | | $0.10 | $0.40 |
91
88
  | `zenmux/qwen/qwen3.5-plus` | 1.0M | | | | | | $0.80 | $5 |
92
89
  | `zenmux/qwen/qwen3.6-plus` | 1.0M | | | | | | $0.50 | $3 |
90
+ | `zenmux/sapiens-ai/agnes-1.5-lite` | 256K | | | | | | $0.12 | $0.60 |
91
+ | `zenmux/sapiens-ai/agnes-1.5-pro` | 256K | | | | | | $0.16 | $0.80 |
93
92
  | `zenmux/stepfun/step-3` | 66K | | | | | | $0.21 | $0.57 |
94
93
  | `zenmux/stepfun/step-3.5-flash` | 256K | | | | | | $0.10 | $0.30 |
95
94
  | `zenmux/stepfun/step-3.5-flash-free` | 256K | | | | | | — | — |
@@ -107,7 +106,6 @@ for await (const chunk of stream) {
107
106
  | `zenmux/x-ai/grok-4.2-fast-non-reasoning` | 2.0M | | | | | | $3 | $9 |
108
107
  | `zenmux/x-ai/grok-code-fast-1` | 256K | | | | | | $0.20 | $2 |
109
108
  | `zenmux/xiaomi/mimo-v2-flash` | 262K | | | | | | $0.10 | $0.30 |
110
- | `zenmux/xiaomi/mimo-v2-flash-free` | 262K | | | | | | — | — |
111
109
  | `zenmux/xiaomi/mimo-v2-omni` | 265K | | | | | | $0.40 | $2 |
112
110
  | `zenmux/xiaomi/mimo-v2-pro` | 1.0M | | | | | | $2 | $5 |
113
111
  | `zenmux/z-ai/glm-4.5` | 128K | | | | | | $0.35 | $2 |
@@ -0,0 +1,122 @@
1
+ # ArthurExporter
2
+
3
+ Sends Tracing data to [Arthur Engine](https://github.com/arthur-ai/arthur-engine) using OpenTelemetry and OpenInference semantic conventions.
4
+
5
+ ## Constructor
6
+
7
+ ```typescript
8
+ new ArthurExporter(config: ArthurExporterConfig)
9
+ ```
10
+
11
+ ## `ArthurExporterConfig`
12
+
13
+ ```typescript
14
+ type ArthurExporterConfig = Omit<OtelExporterConfig, 'provider' | 'exporter'> & {
15
+ apiKey?: string
16
+ endpoint?: string
17
+ taskId?: string
18
+ headers?: Record<string, string>
19
+ }
20
+ ```
21
+
22
+ Inherits from `OtelExporterConfig` (excluding `provider` and `exporter`), which includes:
23
+
24
+ - `timeout?: number` - Export timeout in milliseconds (default: 30000)
25
+ - `batchSize?: number` - Number of spans per batch (default: 512)
26
+ - `logLevel?: LogLevel | 'debug' | 'info' | 'warn' | 'error'` - Log level (default: WARN)
27
+ - `resourceAttributes?: Record<string, any>` - Custom resource attributes
28
+
29
+ ### Metadata passthrough
30
+
31
+ Non-reserved span attributes are serialized into the OpenInference `metadata` payload. Add them via `tracingOptions.metadata` (e.g., `companyId`, `tier`). Reserved fields such as `input`, `output`, `sessionId`, thread/user IDs, and OpenInference IDs are excluded automatically.
32
+
33
+ ## Methods
34
+
35
+ ### `exportTracingEvent`
36
+
37
+ ```typescript
38
+ async exportTracingEvent(event: TracingEvent): Promise<void>
39
+ ```
40
+
41
+ Exports a tracing event to the configured endpoint.
42
+
43
+ ### export
44
+
45
+ ```typescript
46
+ async export(spans: ReadOnlySpan[]): Promise<void>
47
+ ```
48
+
49
+ Batch exports spans using OpenTelemetry with OpenInference semantic conventions.
50
+
51
+ ### flush
52
+
53
+ ```typescript
54
+ async flush(): Promise<void>
55
+ ```
56
+
57
+ Force flushes any buffered spans to the configured endpoint without shutting down the exporter. Useful in serverless environments where you need to ensure spans are exported before the runtime terminates.
58
+
59
+ ### shutdown
60
+
61
+ ```typescript
62
+ async shutdown(): Promise<void>
63
+ ```
64
+
65
+ Flushes pending data and shuts down the client.
66
+
67
+ ## Usage
68
+
69
+ ### Zero-Config (using environment variables)
70
+
71
+ ```typescript
72
+ import { ArthurExporter } from '@mastra/arthur'
73
+
74
+ // Set ARTHUR_API_KEY and ARTHUR_BASE_URL (optionally ARTHUR_TASK_ID)
75
+ const exporter = new ArthurExporter()
76
+ ```
77
+
78
+ ### Explicit Configuration
79
+
80
+ ```typescript
81
+ import { ArthurExporter } from '@mastra/arthur'
82
+
83
+ const exporter = new ArthurExporter({
84
+ apiKey: process.env.ARTHUR_API_KEY!,
85
+ endpoint: 'http://localhost:3030',
86
+ taskId: process.env.ARTHUR_TASK_ID, // Optional
87
+ })
88
+ ```
89
+
90
+ ## `OpenInference` semantic conventions
91
+
92
+ The ArthurExporter implements [OpenInference Semantic Conventions](https://github.com/Arize-ai/openinference/tree/main/spec) for generative AI applications, providing standardized trace structure across different observability platforms.
93
+
94
+ ## Tags support
95
+
96
+ The ArthurExporter supports trace tagging for categorization and filtering. Tags are only applied to root spans and are mapped to the native OpenInference `tag.tags` semantic convention.
97
+
98
+ ### Usage
99
+
100
+ ```typescript
101
+ const result = await agent.generate('Hello', {
102
+ tracingOptions: {
103
+ tags: ['production', 'experiment-v2', 'user-request'],
104
+ },
105
+ })
106
+ ```
107
+
108
+ ### How Tags Are Stored
109
+
110
+ Tags are stored using the OpenInference `tag.tags` attribute:
111
+
112
+ ```json
113
+ {
114
+ "tag.tags": ["production", "experiment-v2", "user-request"]
115
+ }
116
+ ```
117
+
118
+ ## Related
119
+
120
+ - [ArthurExporter Documentation](https://mastra.ai/docs/observability/tracing/exporters/arthur)
121
+ - [Arthur Engine documentation](https://docs.arthur.ai/)
122
+ - [Arthur Engine repository](https://github.com/arthur-ai/arthur-engine)
@@ -16,7 +16,8 @@ interface LangfuseExporterConfig extends BaseExporterConfig {
16
16
  secretKey?: string
17
17
  baseUrl?: string
18
18
  realtime?: boolean
19
- options?: any
19
+ environment?: string
20
+ release?: string
20
21
  }
21
22
  ```
22
23
 
@@ -25,23 +26,17 @@ Extends `BaseExporterConfig`, which includes:
25
26
  - `logger?: IMastraLogger` - Logger instance
26
27
  - `logLevel?: LogLevel | 'debug' | 'info' | 'warn' | 'error'` - Log level (default: INFO)
27
28
 
28
- ## Methods
29
+ ## Properties
29
30
 
30
- ### `exportTracingEvent`
31
+ ### `client`
31
32
 
32
33
  ```typescript
33
- async exportTracingEvent(event: TracingEvent): Promise<void>
34
+ get client(): LangfuseClient | undefined
34
35
  ```
35
36
 
36
- Exports a tracing event to Langfuse.
37
-
38
- ### export
39
-
40
- ```typescript
41
- async export(spans: ReadOnlySpan[]): Promise<void>
42
- ```
37
+ The `LangfuseClient` instance for advanced Langfuse features. Returns `undefined` when the exporter is disabled (missing credentials). Use this for prompt management, evaluations, datasets, and direct API access.
43
38
 
44
- Batch exports spans to Langfuse.
39
+ ## Methods
45
40
 
46
41
  ### flush
47
42
 
@@ -49,7 +44,7 @@ Batch exports spans to Langfuse.
49
44
  async flush(): Promise<void>
50
45
  ```
51
46
 
52
- Force flushes any buffered spans to Langfuse without shutting down the exporter. Useful in serverless environments where you need to ensure spans are exported before the runtime terminates.
47
+ Force flushes any buffered spans and scores to Langfuse without shutting down. Useful in serverless environments where you need to ensure data is exported before the runtime terminates.
53
48
 
54
49
  ### shutdown
55
50
 
@@ -57,7 +52,7 @@ Force flushes any buffered spans to Langfuse without shutting down the exporter.
57
52
  async shutdown(): Promise<void>
58
53
  ```
59
54
 
60
- Flushes pending data and shuts down the client.
55
+ Flushes pending data and shuts down both the span processor and the client.
61
56
 
62
57
  ## Usage
63
58
 
@@ -85,10 +80,11 @@ const exporter = new LangfuseExporter({
85
80
 
86
81
  ## Span mapping
87
82
 
88
- - Root spans Langfuse traces
89
- - `MODEL_GENERATION` spans → Langfuse generations
83
+ Mastra spans are converted to OTel format and sent to Langfuse via `LangfuseSpanProcessor`. Langfuse automatically maps spans based on their `gen_ai.*` attributes:
84
+
85
+ - Spans with a `model` attribute → Langfuse generations
90
86
  - All other spans → Langfuse spans
91
- - Event spans Langfuse events
87
+ - Parent-child relationships are preserved via OTel trace/span IDs
92
88
 
93
89
  ## Prompt linking
94
90
 
@@ -96,22 +92,21 @@ Link LLM generations to [Langfuse Prompt Management](https://langfuse.com/docs/p
96
92
 
97
93
  ```typescript
98
94
  import { buildTracingOptions } from '@mastra/observability'
99
- import { withLangfusePrompt } from '@mastra/langfuse'
100
- import { Langfuse } from 'langfuse'
95
+ import { LangfuseExporter, withLangfusePrompt } from '@mastra/langfuse'
101
96
 
102
- const langfuse = new Langfuse({
103
- publicKey: process.env.LANGFUSE_PUBLIC_KEY!,
104
- secretKey: process.env.LANGFUSE_SECRET_KEY!,
105
- })
97
+ const exporter = new LangfuseExporter()
106
98
 
107
- const prompt = await langfuse.getPrompt('customer-support')
99
+ // Fetch prompt via the Langfuse client
100
+ const prompt = await exporter.client.prompt.get('customer-support', { type: 'text' })
108
101
 
109
102
  const agent = new Agent({
110
103
  name: 'support-agent',
111
- instructions: prompt.prompt,
104
+ instructions: prompt.compile(),
112
105
  model: 'openai/gpt-5.4',
113
106
  defaultGenerateOptions: {
114
- tracingOptions: buildTracingOptions(withLangfusePrompt(prompt)),
107
+ tracingOptions: buildTracingOptions(
108
+ withLangfusePrompt({ name: prompt.name, version: prompt.version }),
109
+ ),
115
110
  },
116
111
  })
117
112
  ```
@@ -123,12 +118,8 @@ const agent = new Agent({
123
118
  Adds Langfuse prompt metadata to tracing options.
124
119
 
125
120
  ```typescript
126
- // With Langfuse SDK prompt object
127
- withLangfusePrompt(prompt)
128
-
129
- // With manual fields
121
+ // Link by name and version (required for Langfuse v5)
130
122
  withLangfusePrompt({ name: 'my-prompt', version: 1 })
131
- withLangfusePrompt({ id: 'prompt-uuid' })
132
123
  ```
133
124
 
134
- When `metadata.langfuse.prompt` is set on a `MODEL_GENERATION` span (with either `id` alone, or `name` + `version`), the exporter automatically links the generation to the prompt in Langfuse.
125
+ The prompt metadata is passed through as span attributes and Langfuse links the generation to the corresponding prompt.
@@ -4,10 +4,10 @@ This reference provides comprehensive information about Mastra templates, includ
4
4
 
5
5
  Mastra templates are pre-built project structures that demonstrate specific use cases and patterns. They provide:
6
6
 
7
- - **Working examples** - Complete, functional Mastra applications
8
- - **Best practices** - Proper project structure and coding conventions
9
- - **Educational resources** - Learn Mastra patterns through real implementations
10
- - **Quickstarts** - Bootstrap projects faster than building from scratch
7
+ - **Working examples**: Complete, functional Mastra applications
8
+ - **Best practices**: Proper project structure and coding conventions
9
+ - **Educational resources**: Learn Mastra patterns through real implementations
10
+ - **Quickstarts**: Bootstrap projects faster than building from scratch
11
11
 
12
12
  ## Using templates
13
13
 
@@ -15,10 +15,30 @@ Mastra templates are pre-built project structures that demonstrate specific use
15
15
 
16
16
  Install a template using the `create-mastra` command:
17
17
 
18
- ```bash
18
+ **npm**:
19
+
20
+ ```sh
19
21
  npx create-mastra@latest --template template-name
20
22
  ```
21
23
 
24
+ **pnpm**:
25
+
26
+ ```sh
27
+ pnpm dlx create-mastra@latest --template template-name
28
+ ```
29
+
30
+ **Yarn**:
31
+
32
+ ```sh
33
+ yarn dlx create-mastra@latest --template template-name
34
+ ```
35
+
36
+ **Bun**:
37
+
38
+ ```sh
39
+ bun x create-mastra@latest --template template-name
40
+ ```
41
+
22
42
  This creates a complete project with all necessary code and configuration.
23
43
 
24
44
  ### Setup Process
@@ -41,20 +61,74 @@ After installation:
41
61
 
42
62
  3. **Install dependencies** (if not done automatically):
43
63
 
44
- ```bash
64
+ **npm**:
65
+
66
+ ```sh
45
67
  npm install
46
68
  ```
47
69
 
70
+ **pnpm**:
71
+
72
+ ```sh
73
+ pnpm install
74
+ ```
75
+
76
+ **Yarn**:
77
+
78
+ ```sh
79
+ yarn install
80
+ ```
81
+
82
+ **Bun**:
83
+
84
+ ```sh
85
+ bun install
86
+ ```
87
+
48
88
  4. **Start development server**:
49
89
 
50
- ```bash
90
+ **npm**:
91
+
92
+ ```sh
51
93
  npm run dev
52
94
  ```
53
95
 
54
- ### Template Structure
96
+ **pnpm**:
97
+
98
+ ```sh
99
+ pnpm run dev
100
+ ```
101
+
102
+ **Yarn**:
103
+
104
+ ```sh
105
+ yarn dev
106
+ ```
107
+
108
+ **Bun**:
109
+
110
+ ```sh
111
+ bun run dev
112
+ ```
113
+
114
+ ### Template structure
55
115
 
56
116
  All templates follow this standardized structure:
57
117
 
118
+ ```text
119
+ your-template/
120
+ ├── src/
121
+ │ └── mastra/
122
+ │ ├── agents/ # Agent definitions
123
+ │ ├── tools/ # Tool definitions
124
+ │ ├── workflows/ # Workflow definitions
125
+ │ └── index.ts # Main Mastra config
126
+ ├── .env.example # Required environment variables
127
+ ├── package.json
128
+ ├── tsconfig.json
129
+ └── README.md
130
+ ```
131
+
58
132
  ## Creating templates
59
133
 
60
134
  ### Requirements
@@ -127,12 +201,12 @@ const agent = new Agent({
127
201
 
128
202
  Templates must be:
129
203
 
130
- - **Single projects** - Not monorepos with multiple applications
131
- - **Framework-free** - No Next.js, Express, or other web framework boilerplate
132
- - **Mastra-focused** - Demonstrate Mastra functionality without additional layers
133
- - **Mergeable** - Structure code for seamless integration into existing projects
134
- - **Node.js compatible** - Support Node.js v22.13.0 and later
135
- - **ESM modules** - Use ES modules (`"type": "module"` in package.json)
204
+ - **Single projects**: Not monorepos with multiple applications
205
+ - **Framework-free**: No Next.js, Express, or other web framework boilerplate
206
+ - **Mastra-focused**: Demonstrate Mastra functionality without additional layers
207
+ - **Mergeable**: Structure code for seamless integration into existing projects
208
+ - **Node.js compatible**: Support Node.js v22.13.0 and later
209
+ - **ESM modules**: Use ES modules (`"type": "module"` in package.json)
136
210
 
137
211
  ### Documentation Requirements
138
212
 
package/CHANGELOG.md CHANGED
@@ -1,5 +1,54 @@
1
1
  # @mastra/mcp-docs-server
2
2
 
3
+ ## 1.1.22-alpha.12
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [[`ac7baf6`](https://github.com/mastra-ai/mastra/commit/ac7baf66ef1db15e03975ef4ebb02724f015a391), [`0df8321`](https://github.com/mastra-ai/mastra/commit/0df832196eeb2450ab77ce887e8553abdd44c5a6), [`61109b3`](https://github.com/mastra-ai/mastra/commit/61109b34feb0e38d54bee4b8ca83eb7345b1d557), [`33f1ead`](https://github.com/mastra-ai/mastra/commit/33f1eadfa19c86953f593478e5fa371093b33779)]:
8
+ - @mastra/core@1.23.0-alpha.8
9
+
10
+ ## 1.1.22-alpha.10
11
+
12
+ ### Patch Changes
13
+
14
+ - Updated dependencies [[`665477b`](https://github.com/mastra-ai/mastra/commit/665477bc104fd52cfef8e7610d7664781a70c220), [`4cc2755`](https://github.com/mastra-ai/mastra/commit/4cc2755a7194cb08720ff2ab4dffb4b4a5103dfd)]:
15
+ - @mastra/core@1.23.0-alpha.7
16
+
17
+ ## 1.1.22-alpha.8
18
+
19
+ ### Patch Changes
20
+
21
+ - Updated dependencies [[`7d6f521`](https://github.com/mastra-ai/mastra/commit/7d6f52164d0cca099f0b07cb2bba334360f1c8ab)]:
22
+ - @mastra/core@1.23.0-alpha.6
23
+
24
+ ## 1.1.22-alpha.7
25
+
26
+ ### Patch Changes
27
+
28
+ - Updated dependencies [[`1371703`](https://github.com/mastra-ai/mastra/commit/1371703835080450ef3f9aea58059a95d0da2e5a), [`98f8a8b`](https://github.com/mastra-ai/mastra/commit/98f8a8bdf5761b9982f3ad3acbe7f1cc3efa71f3)]:
29
+ - @mastra/core@1.23.0-alpha.5
30
+
31
+ ## 1.1.22-alpha.5
32
+
33
+ ### Patch Changes
34
+
35
+ - Updated dependencies [[`fff91cf`](https://github.com/mastra-ai/mastra/commit/fff91cf914de0e731578aacebffdeebef82f0440)]:
36
+ - @mastra/core@1.23.0-alpha.4
37
+
38
+ ## 1.1.22-alpha.3
39
+
40
+ ### Patch Changes
41
+
42
+ - Updated dependencies [[`1805ddc`](https://github.com/mastra-ai/mastra/commit/1805ddc9c9b3b14b63749735a13c05a45af43a80)]:
43
+ - @mastra/core@1.23.0-alpha.3
44
+
45
+ ## 1.1.22-alpha.2
46
+
47
+ ### Patch Changes
48
+
49
+ - Updated dependencies:
50
+ - @mastra/core@1.23.0-alpha.2
51
+
3
52
  ## 1.1.22-alpha.1
4
53
 
5
54
  ### Patch Changes
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/mcp-docs-server",
3
- "version": "1.1.22-alpha.1",
3
+ "version": "1.1.22-alpha.12",
4
4
  "description": "MCP server for accessing Mastra.ai documentation, changelogs, and news.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -29,7 +29,7 @@
29
29
  "jsdom": "^26.1.0",
30
30
  "local-pkg": "^1.1.2",
31
31
  "zod": "^4.3.6",
32
- "@mastra/core": "1.23.0-alpha.1",
32
+ "@mastra/core": "1.23.0-alpha.8",
33
33
  "@mastra/mcp": "^1.4.1"
34
34
  },
35
35
  "devDependencies": {
@@ -47,8 +47,8 @@
47
47
  "typescript": "^5.9.3",
48
48
  "vitest": "4.0.18",
49
49
  "@internal/lint": "0.0.79",
50
- "@mastra/core": "1.23.0-alpha.1",
51
- "@internal/types-builder": "0.0.54"
50
+ "@internal/types-builder": "0.0.54",
51
+ "@mastra/core": "1.23.0-alpha.8"
52
52
  },
53
53
  "homepage": "https://mastra.ai",
54
54
  "repository": {