@mastra/mcp-docs-server 1.1.13 → 1.1.14-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/.docs/docs/deployment/studio.md +9 -24
  2. package/.docs/docs/getting-started/studio.md +28 -16
  3. package/.docs/docs/observability/tracing/exporters/braintrust.md +15 -0
  4. package/.docs/docs/server/auth.md +6 -7
  5. package/.docs/docs/server/custom-api-routes.md +56 -0
  6. package/.docs/docs/server/mastra-server.md +2 -2
  7. package/.docs/guides/deployment/cloudflare.md +1 -1
  8. package/.docs/models/gateways/openrouter.md +4 -1
  9. package/.docs/models/gateways/vercel.md +7 -1
  10. package/.docs/models/index.md +1 -1
  11. package/.docs/models/providers/anthropic.md +2 -2
  12. package/.docs/models/providers/baseten.md +12 -13
  13. package/.docs/models/providers/chutes.md +5 -5
  14. package/.docs/models/providers/deepinfra.md +30 -23
  15. package/.docs/models/providers/google.md +1 -1
  16. package/.docs/models/providers/kilo.md +342 -272
  17. package/.docs/models/providers/nano-gpt.md +36 -36
  18. package/.docs/models/providers/nebius.md +3 -2
  19. package/.docs/models/providers/perplexity-agent.md +19 -18
  20. package/.docs/models/providers/synthetic.md +1 -1
  21. package/.docs/models/providers/vultr.md +17 -12
  22. package/.docs/models/providers/zai-coding-plan.md +3 -2
  23. package/.docs/models/providers/zai.md +3 -2
  24. package/.docs/reference/agents/generate.md +2 -0
  25. package/.docs/reference/agents/network.md +2 -0
  26. package/.docs/reference/ai-sdk/chat-route.md +4 -0
  27. package/.docs/reference/configuration.md +4 -2
  28. package/.docs/reference/deployer/cloudflare.md +12 -1
  29. package/.docs/reference/processors/unicode-normalizer.md +1 -1
  30. package/.docs/reference/streaming/agents/stream.md +2 -0
  31. package/.docs/reference/workflows/run-methods/restart.md +2 -0
  32. package/.docs/reference/workflows/run-methods/resume.md +2 -0
  33. package/.docs/reference/workflows/run-methods/start.md +2 -0
  34. package/.docs/reference/workflows/run-methods/timeTravel.md +2 -0
  35. package/CHANGELOG.md +15 -0
  36. package/dist/prompts/migration.d.ts.map +1 -1
  37. package/dist/stdio.js.map +1 -1
  38. package/package.json +8 -8
@@ -15,7 +15,7 @@ const agent = new Agent({
15
15
  id: "my-agent",
16
16
  name: "My Agent",
17
17
  instructions: "You are a helpful assistant",
18
- model: "nano-gpt/Alibaba-NLP 2/Tongyi-DeepResearch-30B-A3B"
18
+ model: "nano-gpt/Alibaba-NLP/Tongyi-DeepResearch-30B-A3B"
19
19
  });
20
20
 
21
21
  // Generate a response
@@ -38,7 +38,7 @@ for await (const chunk of stream) {
38
38
  | `nano-gpt/aion-labs/aion-1.0` | 66K | | | | | | $4 | $8 |
39
39
  | `nano-gpt/aion-labs/aion-1.0-mini` | 131K | | | | | | $0.80 | $1 |
40
40
  | `nano-gpt/aion-labs/aion-rp-llama-3.1-8b` | 33K | | | | | | $0.20 | $0.20 |
41
- | `nano-gpt/Alibaba-NLP 2/Tongyi-DeepResearch-30B-A3B` | 128K | | | | | | $0.08 | $0.24 |
41
+ | `nano-gpt/Alibaba-NLP/Tongyi-DeepResearch-30B-A3B` | 128K | | | | | | $0.08 | $0.24 |
42
42
  | `nano-gpt/allenai/molmo-2-8b` | 37K | | | | | | $0.20 | $0.20 |
43
43
  | `nano-gpt/allenai/olmo-3-32b-think` | 128K | | | | | | $0.30 | $0.45 |
44
44
  | `nano-gpt/allenai/olmo-3.1-32b-instruct` | 66K | | | | | | $0.20 | $0.60 |
@@ -116,7 +116,7 @@ for await (const chunk of stream) {
116
116
  | `nano-gpt/cohere/command-r` | 128K | | | | | | $0.48 | $1 |
117
117
  | `nano-gpt/cohere/command-r-plus-08-2024` | 128K | | | | | | $3 | $14 |
118
118
  | `nano-gpt/command-a-reasoning-08-2025` | 256K | | | | | | $3 | $10 |
119
- | `nano-gpt/CrucibleLab 2/L3.3-70B-Loki-V2.0` | 16K | | | | | | $0.49 | $0.49 |
119
+ | `nano-gpt/CrucibleLab/L3.3-70B-Loki-V2.0` | 16K | | | | | | $0.49 | $0.49 |
120
120
  | `nano-gpt/deepclaude` | 128K | | | | | | $3 | $15 |
121
121
  | `nano-gpt/deepcogito/cogito-v1-preview-qwen-32B` | 128K | | | | | | $2 | $2 |
122
122
  | `nano-gpt/deepcogito/cogito-v2.1-671b` | 128K | | | | | | $1 | $1 |
@@ -141,7 +141,7 @@ for await (const chunk of stream) {
141
141
  | `nano-gpt/deepseek/deepseek-v3.2:thinking` | 163K | | | | | | $0.28 | $0.42 |
142
142
  | `nano-gpt/dmind/dmind-1` | 33K | | | | | | $0.30 | $0.60 |
143
143
  | `nano-gpt/dmind/dmind-1-mini` | 33K | | | | | | $0.20 | $0.40 |
144
- | `nano-gpt/Doctor-Shotgun 2/MS3.2-24B-Magnum-Diamond` | 16K | | | | | | $0.49 | $0.49 |
144
+ | `nano-gpt/Doctor-Shotgun/MS3.2-24B-Magnum-Diamond` | 16K | | | | | | $0.49 | $0.49 |
145
145
  | `nano-gpt/doubao-1-5-thinking-pro-250415` | 128K | | | | | | $0.60 | $2 |
146
146
  | `nano-gpt/doubao-1-5-thinking-pro-vision-250415` | 128K | | | | | | $0.60 | $2 |
147
147
  | `nano-gpt/doubao-1-5-thinking-vision-pro-250428` | 128K | | | | | | $0.55 | $1 |
@@ -157,8 +157,8 @@ for await (const chunk of stream) {
157
157
  | `nano-gpt/doubao-seed-2-0-mini-260215` | 256K | | | | | | $0.05 | $0.48 |
158
158
  | `nano-gpt/doubao-seed-2-0-pro-260215` | 256K | | | | | | $0.78 | $4 |
159
159
  | `nano-gpt/doubao-seed-code-preview-latest` | 256K | | | | | | $0.10 | $0.40 |
160
- | `nano-gpt/Envoid 2/Llama-3.05-Nemotron-Tenyxchat-Storybreaker-70B` | 16K | | | | | | $0.49 | $0.49 |
161
- | `nano-gpt/Envoid 2/Llama-3.05-NT-Storybreaker-Ministral-70B` | 16K | | | | | | $0.49 | $0.49 |
160
+ | `nano-gpt/Envoid/Llama-3.05-Nemotron-Tenyxchat-Storybreaker-70B` | 16K | | | | | | $0.49 | $0.49 |
161
+ | `nano-gpt/Envoid/Llama-3.05-NT-Storybreaker-Ministral-70B` | 16K | | | | | | $0.49 | $0.49 |
162
162
  | `nano-gpt/ernie-4.5-8k-preview` | 8K | | | | | | $0.66 | $3 |
163
163
  | `nano-gpt/ernie-4.5-turbo-128k` | 128K | | | | | | $0.13 | $0.55 |
164
164
  | `nano-gpt/ernie-4.5-turbo-vl-32k` | 32K | | | | | | $0.49 | $1 |
@@ -169,17 +169,17 @@ for await (const chunk of stream) {
169
169
  | `nano-gpt/ernie-x1-turbo-32k` | 32K | | | | | | $0.17 | $0.66 |
170
170
  | `nano-gpt/ernie-x1.1-preview` | 64K | | | | | | $0.15 | $0.60 |
171
171
  | `nano-gpt/essentialai/rnj-1-instruct` | 128K | | | | | | $0.15 | $0.15 |
172
- | `nano-gpt/EVA-UNIT-01 2/EVA-LLaMA-3.33-70B-v0.0` | 16K | | | | | | $2 | $2 |
173
- | `nano-gpt/EVA-UNIT-01 2/EVA-LLaMA-3.33-70B-v0.1` | 16K | | | | | | $2 | $2 |
174
- | `nano-gpt/EVA-UNIT-01 2/EVA-Qwen2.5-32B-v0.2` | 16K | | | | | | $0.80 | $0.80 |
175
- | `nano-gpt/EVA-UNIT-01 2/EVA-Qwen2.5-72B-v0.2` | 16K | | | | | | $0.80 | $0.80 |
172
+ | `nano-gpt/EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.0` | 16K | | | | | | $2 | $2 |
173
+ | `nano-gpt/EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1` | 16K | | | | | | $2 | $2 |
174
+ | `nano-gpt/EVA-UNIT-01/EVA-Qwen2.5-32B-v0.2` | 16K | | | | | | $0.80 | $0.80 |
175
+ | `nano-gpt/EVA-UNIT-01/EVA-Qwen2.5-72B-v0.2` | 16K | | | | | | $0.80 | $0.80 |
176
176
  | `nano-gpt/exa-answer` | 4K | | | | | | $3 | $3 |
177
177
  | `nano-gpt/exa-research` | 8K | | | | | | $3 | $3 |
178
178
  | `nano-gpt/exa-research-pro` | 16K | | | | | | $3 | $3 |
179
179
  | `nano-gpt/failspy/Meta-Llama-3-70B-Instruct-abliterated-v3.5` | 8K | | | | | | $0.70 | $0.70 |
180
180
  | `nano-gpt/fastgpt` | 33K | | | | | | $8 | $8 |
181
181
  | `nano-gpt/featherless-ai/Qwerky-72B` | 32K | | | | | | $0.50 | $0.50 |
182
- | `nano-gpt/GalrionSoftworks 2/MN-LooseCannon-12B-v1` | 16K | | | | | | $0.49 | $0.49 |
182
+ | `nano-gpt/GalrionSoftworks/MN-LooseCannon-12B-v1` | 16K | | | | | | $0.49 | $0.49 |
183
183
  | `nano-gpt/gemini-2.0-flash-001` | 1.0M | | | | | | $0.10 | $0.41 |
184
184
  | `nano-gpt/gemini-2.0-flash-exp-image-generation` | 33K | | | | | | $0.20 | $0.80 |
185
185
  | `nano-gpt/gemini-2.0-flash-lite` | 1.0M | | | | | | $0.07 | $0.31 |
@@ -243,7 +243,7 @@ for await (const chunk of stream) {
243
243
  | `nano-gpt/grok-3-fast-beta` | 131K | | | | | | $5 | $25 |
244
244
  | `nano-gpt/grok-3-mini-beta` | 131K | | | | | | $0.30 | $0.50 |
245
245
  | `nano-gpt/grok-3-mini-fast-beta` | 131K | | | | | | $0.60 | $4 |
246
- | `nano-gpt/Gryphe 2/MythoMax-L2-13b` | 4K | | | | | | $0.10 | $0.10 |
246
+ | `nano-gpt/Gryphe/MythoMax-L2-13b` | 4K | | | | | | $0.10 | $0.10 |
247
247
  | `nano-gpt/hidream` | — | | | | | | — | — |
248
248
  | `nano-gpt/huihui-ai/DeepSeek-R1-Distill-Llama-70B-abliterated` | 16K | | | | | | $0.70 | $0.70 |
249
249
  | `nano-gpt/huihui-ai/DeepSeek-R1-Distill-Qwen-32B-abliterated` | 16K | | | | | | $1 | $1 |
@@ -252,7 +252,7 @@ for await (const chunk of stream) {
252
252
  | `nano-gpt/huihui-ai/Qwen2.5-32B-Instruct-abliterated` | 33K | | | | | | $0.70 | $0.70 |
253
253
  | `nano-gpt/hunyuan-t1-latest` | 256K | | | | | | $0.17 | $0.66 |
254
254
  | `nano-gpt/hunyuan-turbos-20250226` | 24K | | | | | | $0.19 | $0.37 |
255
- | `nano-gpt/Infermatic 2/MN-12B-Inferor-v0.0` | 16K | | | | | | $0.25 | $0.49 |
255
+ | `nano-gpt/Infermatic/MN-12B-Inferor-v0.0` | 16K | | | | | | $0.25 | $0.49 |
256
256
  | `nano-gpt/inflatebot/MN-12B-Mag-Mell-R1` | 16K | | | | | | $0.49 | $0.49 |
257
257
  | `nano-gpt/inflection/inflection-3-pi` | 8K | | | | | | $2 | $10 |
258
258
  | `nano-gpt/inflection/inflection-3-productivity` | 8K | | | | | | $2 | $10 |
@@ -267,7 +267,7 @@ for await (const chunk of stream) {
267
267
  | `nano-gpt/KAT-Coder-Pro-V1` | 256K | | | | | | $2 | $6 |
268
268
  | `nano-gpt/kimi-k2-instruct-fast` | 131K | | | | | | $0.10 | $2 |
269
269
  | `nano-gpt/kimi-thinking-preview` | 128K | | | | | | $31 | $31 |
270
- | `nano-gpt/LatitudeGames 2/Wayfarer-Large-70B-Llama-3.3` | 16K | | | | | | $0.70 | $0.70 |
270
+ | `nano-gpt/LatitudeGames/Wayfarer-Large-70B-Llama-3.3` | 16K | | | | | | $0.70 | $0.70 |
271
271
  | `nano-gpt/learnlm-1.5-pro-experimental` | 33K | | | | | | $4 | $11 |
272
272
  | `nano-gpt/Llama-3.3-70B-Anthrobomination` | 33K | | | | | | $0.31 | $0.31 |
273
273
  | `nano-gpt/Llama-3.3-70B-Argunaut-1-SFT` | 33K | | | | | | $0.31 | $0.31 |
@@ -312,9 +312,9 @@ for await (const chunk of stream) {
312
312
  | `nano-gpt/Llama-3.3+(3.1v3.3)-70B-Hanami-x1` | 33K | | | | | | $0.31 | $0.31 |
313
313
  | `nano-gpt/Llama-3.3+(3.1v3.3)-70B-New-Dawn-v1.1` | 33K | | | | | | $0.31 | $0.31 |
314
314
  | `nano-gpt/Llama-3.3+(3v3.3)-70B-TenyxChat-DaybreakStorywriter` | 33K | | | | | | $0.31 | $0.31 |
315
- | `nano-gpt/LLM360 2/K2-Think` | 128K | | | | | | $0.17 | $0.68 |
315
+ | `nano-gpt/LLM360/K2-Think` | 128K | | | | | | $0.17 | $0.68 |
316
316
  | `nano-gpt/Magistral-Small-2506` | 33K | | | | | | $0.40 | $1 |
317
- | `nano-gpt/MarinaraSpaghetti 2/NemoMix-Unleashed-12B` | 33K | | | | | | $0.49 | $0.49 |
317
+ | `nano-gpt/MarinaraSpaghetti/NemoMix-Unleashed-12B` | 33K | | | | | | $0.49 | $0.49 |
318
318
  | `nano-gpt/meganova-ai/manta-flash-1.0` | 16K | | | | | | $0.02 | $0.16 |
319
319
  | `nano-gpt/meganova-ai/manta-mini-1.0` | 8K | | | | | | $0.02 | $0.16 |
320
320
  | `nano-gpt/meganova-ai/manta-pro-1.0` | 33K | | | | | | $0.06 | $0.50 |
@@ -335,7 +335,7 @@ for await (const chunk of stream) {
335
335
  | `nano-gpt/minimax/minimax-m2-her` | 66K | | | | | | $0.30 | $1 |
336
336
  | `nano-gpt/minimax/minimax-m2.1` | 200K | | | | | | $0.33 | $1 |
337
337
  | `nano-gpt/minimax/minimax-m2.5` | 205K | | | | | | $0.30 | $1 |
338
- | `nano-gpt/MiniMaxAI 2/MiniMax-M1-80k` | 1.0M | | | | | | $0.61 | $2 |
338
+ | `nano-gpt/MiniMaxAI/MiniMax-M1-80k` | 1.0M | | | | | | $0.61 | $2 |
339
339
  | `nano-gpt/miromind-ai/mirothinker-v1.5-235b` | 33K | | | | | | $0.30 | $1 |
340
340
  | `nano-gpt/Mistral-Nemo-12B-Instruct-2407` | 16K | | | | | | $0.01 | $0.01 |
341
341
  | `nano-gpt/mistral-small-31-24b-instruct` | 128K | | | | | | $0.10 | $0.30 |
@@ -367,8 +367,8 @@ for await (const chunk of stream) {
367
367
  | `nano-gpt/moonshotai/kimi-k2-thinking-turbo-original` | 256K | | | | | | $1 | $8 |
368
368
  | `nano-gpt/moonshotai/kimi-k2.5` | 256K | | | | | | $0.30 | $2 |
369
369
  | `nano-gpt/moonshotai/kimi-k2.5:thinking` | 256K | | | | | | $0.30 | $2 |
370
- | `nano-gpt/NeverSleep 2/Llama-3-Lumimaid-70B-v0.1` | 16K | | | | | | $2 | $2 |
371
- | `nano-gpt/NeverSleep 2/Lumimaid-v0.2-70B` | 16K | | | | | | $1 | $2 |
370
+ | `nano-gpt/NeverSleep/Llama-3-Lumimaid-70B-v0.1` | 16K | | | | | | $2 | $2 |
371
+ | `nano-gpt/NeverSleep/Lumimaid-v0.2-70B` | 16K | | | | | | $1 | $2 |
372
372
  | `nano-gpt/nex-agi/deepseek-v3.1-nex-n1` | 128K | | | | | | $0.28 | $0.42 |
373
373
  | `nano-gpt/nothingiisreal/L3.1-70B-Celeste-V0.1-BF16` | 16K | | | | | | $0.49 | $0.49 |
374
374
  | `nano-gpt/NousResearch 2/DeepHermes-3-Mistral-24B-Preview` | 128K | | | | | | $0.30 | $0.30 |
@@ -448,13 +448,13 @@ for await (const chunk of stream) {
448
448
  | `nano-gpt/qwq-32b` | 128K | | | | | | $0.26 | $0.30 |
449
449
  | `nano-gpt/QwQ-32B-ArliAI-RpR-v1` | 33K | | | | | | $0.20 | $0.20 |
450
450
  | `nano-gpt/raifle/sorcererlm-8x22b` | 16K | | | | | | $5 | $5 |
451
- | `nano-gpt/ReadyArt 2/MS3.2-The-Omega-Directive-24B-Unslop-v2.0` | 16K | | | | | | $0.50 | $0.50 |
452
- | `nano-gpt/ReadyArt 2/The-Omega-Abomination-L-70B-v1.0` | 16K | | | | | | $0.70 | $0.95 |
453
- | `nano-gpt/Salesforce 2/Llama-xLAM-2-70b-fc-r` | 128K | | | | | | $3 | $3 |
454
- | `nano-gpt/Sao10K 2/L3-8B-Stheno-v3.2` | 16K | | | | | | $0.20 | $0.20 |
455
- | `nano-gpt/Sao10K 2/L3.1-70B-Euryale-v2.2` | 20K | | | | | | $0.31 | $0.36 |
456
- | `nano-gpt/Sao10K 2/L3.1-70B-Hanami-x1` | 16K | | | | | | $0.49 | $0.49 |
457
- | `nano-gpt/Sao10K 2/L3.3-70B-Euryale-v2.3` | 20K | | | | | | $0.49 | $0.49 |
451
+ | `nano-gpt/ReadyArt/MS3.2-The-Omega-Directive-24B-Unslop-v2.0` | 16K | | | | | | $0.50 | $0.50 |
452
+ | `nano-gpt/ReadyArt/The-Omega-Abomination-L-70B-v1.0` | 16K | | | | | | $0.70 | $0.95 |
453
+ | `nano-gpt/Salesforce/Llama-xLAM-2-70b-fc-r` | 128K | | | | | | $3 | $3 |
454
+ | `nano-gpt/Sao10K/L3-8B-Stheno-v3.2` | 16K | | | | | | $0.20 | $0.20 |
455
+ | `nano-gpt/Sao10K/L3.1-70B-Euryale-v2.2` | 20K | | | | | | $0.31 | $0.36 |
456
+ | `nano-gpt/Sao10K/L3.1-70B-Hanami-x1` | 16K | | | | | | $0.49 | $0.49 |
457
+ | `nano-gpt/Sao10K/L3.3-70B-Euryale-v2.3` | 20K | | | | | | $0.49 | $0.49 |
458
458
  | `nano-gpt/sarvan-medium` | 128K | | | | | | $0.25 | $0.75 |
459
459
  | `nano-gpt/shisa-ai/shisa-v2-llama3.3-70b` | 128K | | | | | | $0.50 | $0.50 |
460
460
  | `nano-gpt/shisa-ai/shisa-v2.1-llama3.3-70b` | 33K | | | | | | $0.50 | $0.50 |
@@ -465,12 +465,12 @@ for await (const chunk of stream) {
465
465
  | `nano-gpt/soob3123/amoral-gemma3-27B-v2` | 33K | | | | | | $0.30 | $0.30 |
466
466
  | `nano-gpt/soob3123/GrayLine-Qwen3-8B` | 16K | | | | | | $0.30 | $0.30 |
467
467
  | `nano-gpt/soob3123/Veiled-Calla-12B` | 33K | | | | | | $0.30 | $0.30 |
468
- | `nano-gpt/Steelskull 2/L3.3-Cu-Mai-R1-70b` | 16K | | | | | | $0.49 | $0.49 |
469
- | `nano-gpt/Steelskull 2/L3.3-Electra-R1-70b` | 16K | | | | | | $0.70 | $0.70 |
470
- | `nano-gpt/Steelskull 2/L3.3-MS-Evalebis-70b` | 16K | | | | | | $0.49 | $0.49 |
471
- | `nano-gpt/Steelskull 2/L3.3-MS-Evayale-70B` | 16K | | | | | | $0.49 | $0.49 |
472
- | `nano-gpt/Steelskull 2/L3.3-MS-Nevoria-70b` | 16K | | | | | | $0.49 | $0.49 |
473
- | `nano-gpt/Steelskull 2/L3.3-Nevoria-R1-70b` | 16K | | | | | | $0.49 | $0.49 |
468
+ | `nano-gpt/Steelskull/L3.3-Cu-Mai-R1-70b` | 16K | | | | | | $0.49 | $0.49 |
469
+ | `nano-gpt/Steelskull/L3.3-Electra-R1-70b` | 16K | | | | | | $0.70 | $0.70 |
470
+ | `nano-gpt/Steelskull/L3.3-MS-Evalebis-70b` | 16K | | | | | | $0.49 | $0.49 |
471
+ | `nano-gpt/Steelskull/L3.3-MS-Evayale-70B` | 16K | | | | | | $0.49 | $0.49 |
472
+ | `nano-gpt/Steelskull/L3.3-MS-Nevoria-70b` | 16K | | | | | | $0.49 | $0.49 |
473
+ | `nano-gpt/Steelskull/L3.3-Nevoria-R1-70b` | 16K | | | | | | $0.49 | $0.49 |
474
474
  | `nano-gpt/step-2-16k-exp` | 16K | | | | | | $7 | $20 |
475
475
  | `nano-gpt/step-2-mini` | 8K | | | | | | $0.20 | $0.41 |
476
476
  | `nano-gpt/step-3` | 66K | | | | | | $0.25 | $0.65 |
@@ -515,7 +515,7 @@ for await (const chunk of stream) {
515
515
  | `nano-gpt/THUDM/GLM-Z1-Rumination-32B-0414` | 32K | | | | | | $0.20 | $0.20 |
516
516
  | `nano-gpt/tngtech/DeepSeek-TNG-R1T2-Chimera` | 128K | | | | | | $0.31 | $0.31 |
517
517
  | `nano-gpt/tngtech/tng-r1t-chimera` | 128K | | | | | | $0.30 | $1 |
518
- | `nano-gpt/Tongyi-Zhiwen 2/QwenLong-L1-32B` | 128K | | | | | | $0.14 | $0.60 |
518
+ | `nano-gpt/Tongyi-Zhiwen/QwenLong-L1-32B` | 128K | | | | | | $0.14 | $0.60 |
519
519
  | `nano-gpt/undi95/remm-slerp-l2-13b` | 6K | | | | | | $0.80 | $1 |
520
520
  | `nano-gpt/universal-summarizer` | 33K | | | | | | $30 | $30 |
521
521
  | `nano-gpt/unsloth/gemma-3-12b-it` | 128K | | | | | | $0.27 | $0.27 |
@@ -527,7 +527,7 @@ for await (const chunk of stream) {
527
527
  | `nano-gpt/v0-1.5-md` | 200K | | | | | | $3 | $15 |
528
528
  | `nano-gpt/venice-uncensored` | 128K | | | | | | $0.40 | $0.40 |
529
529
  | `nano-gpt/venice-uncensored:web` | 80K | | | | | | $0.40 | $0.40 |
530
- | `nano-gpt/VongolaChouko 2/Starcannon-Unleashed-12B-v1.0` | 16K | | | | | | $0.49 | $0.49 |
530
+ | `nano-gpt/VongolaChouko/Starcannon-Unleashed-12B-v1.0` | 16K | | | | | | $0.49 | $0.49 |
531
531
  | `nano-gpt/x-ai/grok-4-07-09` | 256K | | | | | | $3 | $15 |
532
532
  | `nano-gpt/x-ai/grok-4-fast` | 2.0M | | | | | | $0.20 | $0.50 |
533
533
  | `nano-gpt/x-ai/grok-4-fast:thinking` | 2.0M | | | | | | $0.20 | $0.50 |
@@ -561,7 +561,7 @@ const agent = new Agent({
561
561
  name: "custom-agent",
562
562
  model: {
563
563
  url: "https://nano-gpt.com/api/v1",
564
- id: "nano-gpt/Alibaba-NLP 2/Tongyi-DeepResearch-30B-A3B",
564
+ id: "nano-gpt/Alibaba-NLP/Tongyi-DeepResearch-30B-A3B",
565
565
  apiKey: process.env.NANO_GPT_API_KEY,
566
566
  headers: {
567
567
  "X-Custom-Header": "value"
@@ -580,7 +580,7 @@ const agent = new Agent({
580
580
  const useAdvanced = requestContext.task === "complex";
581
581
  return useAdvanced
582
582
  ? "nano-gpt/zai-org/glm-5:thinking"
583
- : "nano-gpt/Alibaba-NLP 2/Tongyi-DeepResearch-30B-A3B";
583
+ : "nano-gpt/Alibaba-NLP/Tongyi-DeepResearch-30B-A3B";
584
584
  }
585
585
  });
586
586
  ```
@@ -1,6 +1,6 @@
1
1
  # ![Nebius Token Factory logo](https://models.dev/logos/nebius.svg)Nebius Token Factory
2
2
 
3
- Access 48 Nebius Token Factory models through Mastra's model router. Authentication is handled automatically using the `NEBIUS_API_KEY` environment variable.
3
+ Access 49 Nebius Token Factory models through Mastra's model router. Authentication is handled automatically using the `NEBIUS_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Nebius Token Factory documentation](https://docs.tokenfactory.nebius.com/).
6
6
 
@@ -57,10 +57,11 @@ for await (const chunk of stream) {
57
57
  | `nebius/moonshotai/Kimi-K2-Instruct` | 200K | | | | | | $0.50 | $2 |
58
58
  | `nebius/moonshotai/Kimi-K2-Thinking` | 128K | | | | | | $0.60 | $3 |
59
59
  | `nebius/moonshotai/Kimi-K2.5` | 256K | | | | | | $0.50 | $3 |
60
+ | `nebius/moonshotai/Kimi-K2.5-fast` | 256K | | | | | | $0.50 | $3 |
60
61
  | `nebius/NousResearch/Hermes-4-405B` | 128K | | | | | | $1 | $3 |
61
62
  | `nebius/NousResearch/Hermes-4-70B` | 128K | | | | | | $0.13 | $0.40 |
62
63
  | `nebius/nvidia/Llama-3_1-Nemotron-Ultra-253B-v1` | 128K | | | | | | $0.60 | $2 |
63
- | `nebius/nvidia/Nemotron-3-Super-120B-A12B` | 256K | | | | | | $0.30 | $0.90 |
64
+ | `nebius/nvidia/nemotron-3-super-120b-a12b` | 256K | | | | | | $0.30 | $0.90 |
64
65
  | `nebius/nvidia/Nemotron-Nano-V2-12b` | 32K | | | | | | $0.07 | $0.20 |
65
66
  | `nebius/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B` | 32K | | | | | | $0.06 | $0.24 |
66
67
  | `nebius/openai/gpt-oss-120b` | 128K | | | | | | $0.15 | $0.60 |
@@ -1,6 +1,6 @@
1
1
  # ![Perplexity Agent logo](https://models.dev/logos/perplexity-agent.svg)Perplexity Agent
2
2
 
3
- Access 15 Perplexity Agent models through Mastra's model router. Authentication is handled automatically using the `PERPLEXITY_API_KEY` environment variable.
3
+ Access 16 Perplexity Agent models through Mastra's model router. Authentication is handled automatically using the `PERPLEXITY_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Perplexity Agent documentation](https://docs.perplexity.ai/docs/agent-api/models).
6
6
 
@@ -32,23 +32,24 @@ for await (const chunk of stream) {
32
32
 
33
33
  ## Models
34
34
 
35
- | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
- | -------------------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
- | `perplexity-agent/anthropic/claude-haiku-4-5` | 200K | | | | | | $1 | $5 |
38
- | `perplexity-agent/anthropic/claude-opus-4-5` | 200K | | | | | | $5 | $25 |
39
- | `perplexity-agent/anthropic/claude-opus-4-6` | 200K | | | | | | $5 | $25 |
40
- | `perplexity-agent/anthropic/claude-sonnet-4-5` | 200K | | | | | | $3 | $15 |
41
- | `perplexity-agent/anthropic/claude-sonnet-4-6` | 200K | | | | | | $3 | $15 |
42
- | `perplexity-agent/google/gemini-2.5-flash` | 1.0M | | | | | | $0.30 | $3 |
43
- | `perplexity-agent/google/gemini-2.5-pro` | 1.0M | | | | | | $1 | $10 |
44
- | `perplexity-agent/google/gemini-3-flash-preview` | 1.0M | | | | | | $0.50 | $3 |
45
- | `perplexity-agent/google/gemini-3-pro-preview` | 1.0M | | | | | | $2 | $12 |
46
- | `perplexity-agent/google/gemini-3.1-pro-preview` | 1.0M | | | | | | $2 | $12 |
47
- | `perplexity-agent/openai/gpt-5-mini` | 400K | | | | | | $0.25 | $2 |
48
- | `perplexity-agent/openai/gpt-5.1` | 400K | | | | | | $1 | $10 |
49
- | `perplexity-agent/openai/gpt-5.2` | 400K | | | | | | $2 | $14 |
50
- | `perplexity-agent/perplexity/sonar` | 128K | | | | | | $0.25 | $3 |
51
- | `perplexity-agent/xai/grok-4-1-fast-non-reasoning` | 2.0M | | | | | | $0.20 | $0.50 |
35
+ | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
+ | ---------------------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
+ | `perplexity-agent/anthropic/claude-haiku-4-5` | 200K | | | | | | $1 | $5 |
38
+ | `perplexity-agent/anthropic/claude-opus-4-5` | 200K | | | | | | $5 | $25 |
39
+ | `perplexity-agent/anthropic/claude-opus-4-6` | 200K | | | | | | $5 | $25 |
40
+ | `perplexity-agent/anthropic/claude-sonnet-4-5` | 200K | | | | | | $3 | $15 |
41
+ | `perplexity-agent/anthropic/claude-sonnet-4-6` | 200K | | | | | | $3 | $15 |
42
+ | `perplexity-agent/google/gemini-2.5-flash` | 1.0M | | | | | | $0.30 | $3 |
43
+ | `perplexity-agent/google/gemini-2.5-pro` | 1.0M | | | | | | $1 | $10 |
44
+ | `perplexity-agent/google/gemini-3-flash-preview` | 1.0M | | | | | | $0.50 | $3 |
45
+ | `perplexity-agent/google/gemini-3.1-pro-preview` | 1.0M | | | | | | $2 | $12 |
46
+ | `perplexity-agent/nvidia/nemotron-3-super-120b-a12b` | 1.0M | | | | | | $0.25 | $3 |
47
+ | `perplexity-agent/openai/gpt-5-mini` | 400K | | | | | | $0.25 | $2 |
48
+ | `perplexity-agent/openai/gpt-5.1` | 400K | | | | | | $1 | $10 |
49
+ | `perplexity-agent/openai/gpt-5.2` | 400K | | | | | | $2 | $14 |
50
+ | `perplexity-agent/openai/gpt-5.4` | 1.1M | | | | | | $3 | $15 |
51
+ | `perplexity-agent/perplexity/sonar` | 128K | | | | | | $0.25 | $3 |
52
+ | `perplexity-agent/xai/grok-4-1-fast-non-reasoning` | 2.0M | | | | | | $0.20 | $0.50 |
52
53
 
53
54
  ## Advanced configuration
54
55
 
@@ -72,7 +72,7 @@ const agent = new Agent({
72
72
  id: "custom-agent",
73
73
  name: "custom-agent",
74
74
  model: {
75
- url: "https://api.synthetic.new/v1",
75
+ url: "https://api.synthetic.new/openai/v1",
76
76
  id: "synthetic/hf:MiniMaxAI/MiniMax-M2",
77
77
  apiKey: process.env.SYNTHETIC_API_KEY,
78
78
  headers: {
@@ -1,6 +1,6 @@
1
1
  # ![Vultr logo](https://models.dev/logos/vultr.svg)Vultr
2
2
 
3
- Access 5 Vultr models through Mastra's model router. Authentication is handled automatically using the `VULTR_API_KEY` environment variable.
3
+ Access 10 Vultr models through Mastra's model router. Authentication is handled automatically using the `VULTR_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Vultr documentation](https://api.vultrinference.com/).
6
6
 
@@ -15,7 +15,7 @@ const agent = new Agent({
15
15
  id: "my-agent",
16
16
  name: "My Agent",
17
17
  instructions: "You are a helpful assistant",
18
- model: "vultr/deepseek-r1-distill-llama-70b"
18
+ model: "vultr/DeepSeek-R1-Distill-Llama-70B"
19
19
  });
20
20
 
21
21
  // Generate a response
@@ -32,13 +32,18 @@ for await (const chunk of stream) {
32
32
 
33
33
  ## Models
34
34
 
35
- | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
- | ------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
- | `vultr/deepseek-r1-distill-llama-70b` | 122K | | | | | | $0.20 | $0.20 |
38
- | `vultr/deepseek-r1-distill-qwen-32b` | 122K | | | | | | $0.20 | $0.20 |
39
- | `vultr/gpt-oss-120b` | 122K | | | | | | $0.20 | $0.20 |
40
- | `vultr/kimi-k2-instruct` | 59K | | | | | | $0.20 | $0.20 |
41
- | `vultr/qwen2.5-coder-32b-instruct` | 13K | | | | | | $0.20 | $0.20 |
35
+ | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
+ | ----------------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
+ | `vultr/DeepSeek-R1-Distill-Llama-70B` | 130K | | | | | | $2 | $2 |
38
+ | `vultr/DeepSeek-R1-Distill-Qwen-32B` | 130K | | | | | | $0.30 | $0.30 |
39
+ | `vultr/DeepSeek-V3.2` | 163K | | | | | | $0.55 | $2 |
40
+ | `vultr/GLM-5-FP8` | 202K | | | | | | $0.85 | $3 |
41
+ | `vultr/gpt-oss-120b` | 130K | | | | | | $0.15 | $0.60 |
42
+ | `vultr/Kimi-K2.5` | 261K | | | | | | $0.55 | $3 |
43
+ | `vultr/Llama-3_1-Nemotron-Ultra-253B-v1` | 32K | | | | | | $0.55 | $2 |
44
+ | `vultr/MiniMax-M2.5` | 196K | | | | | | $0.30 | $1 |
45
+ | `vultr/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4` | 260K | | | | | | $0.20 | $0.80 |
46
+ | `vultr/Qwen2.5-Coder-32B-Instruct` | 15K | | | | | | $0.20 | $0.60 |
42
47
 
43
48
  ## Advanced configuration
44
49
 
@@ -50,7 +55,7 @@ const agent = new Agent({
50
55
  name: "custom-agent",
51
56
  model: {
52
57
  url: "https://api.vultrinference.com/v1",
53
- id: "vultr/deepseek-r1-distill-llama-70b",
58
+ id: "vultr/DeepSeek-R1-Distill-Llama-70B",
54
59
  apiKey: process.env.VULTR_API_KEY,
55
60
  headers: {
56
61
  "X-Custom-Header": "value"
@@ -68,8 +73,8 @@ const agent = new Agent({
68
73
  model: ({ requestContext }) => {
69
74
  const useAdvanced = requestContext.task === "complex";
70
75
  return useAdvanced
71
- ? "vultr/qwen2.5-coder-32b-instruct"
72
- : "vultr/deepseek-r1-distill-llama-70b";
76
+ ? "vultr/gpt-oss-120b"
77
+ : "vultr/DeepSeek-R1-Distill-Llama-70B";
73
78
  }
74
79
  });
75
80
  ```
@@ -1,6 +1,6 @@
1
1
  # ![Z.AI Coding Plan logo](https://models.dev/logos/zai-coding-plan.svg)Z.AI Coding Plan
2
2
 
3
- Access 10 Z.AI Coding Plan models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
3
+ Access 11 Z.AI Coding Plan models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Z.AI Coding Plan documentation](https://docs.z.ai/devpack/overview).
6
6
 
@@ -44,6 +44,7 @@ for await (const chunk of stream) {
44
44
  | `zai-coding-plan/glm-4.7-flash` | 200K | | | | | | — | — |
45
45
  | `zai-coding-plan/glm-4.7-flashx` | 200K | | | | | | $0.07 | $0.40 |
46
46
  | `zai-coding-plan/glm-5` | 205K | | | | | | — | — |
47
+ | `zai-coding-plan/glm-5-turbo` | 200K | | | | | | — | — |
47
48
 
48
49
  ## Advanced configuration
49
50
 
@@ -73,7 +74,7 @@ const agent = new Agent({
73
74
  model: ({ requestContext }) => {
74
75
  const useAdvanced = requestContext.task === "complex";
75
76
  return useAdvanced
76
- ? "zai-coding-plan/glm-5"
77
+ ? "zai-coding-plan/glm-5-turbo"
77
78
  : "zai-coding-plan/glm-4.5";
78
79
  }
79
80
  });
@@ -1,6 +1,6 @@
1
1
  # ![Z.AI logo](https://models.dev/logos/zai.svg)Z.AI
2
2
 
3
- Access 9 Z.AI models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
3
+ Access 10 Z.AI models through Mastra's model router. Authentication is handled automatically using the `ZHIPU_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Z.AI documentation](https://docs.z.ai/guides/overview/pricing).
6
6
 
@@ -43,6 +43,7 @@ for await (const chunk of stream) {
43
43
  | `zai/glm-4.7` | 205K | | | | | | $0.60 | $2 |
44
44
  | `zai/glm-4.7-flash` | 200K | | | | | | — | — |
45
45
  | `zai/glm-5` | 205K | | | | | | $1 | $3 |
46
+ | `zai/glm-5-turbo` | 200K | | | | | | $1 | $4 |
46
47
 
47
48
  ## Advanced configuration
48
49
 
@@ -72,7 +73,7 @@ const agent = new Agent({
72
73
  model: ({ requestContext }) => {
73
74
  const useAdvanced = requestContext.task === "complex";
74
75
  return useAdvanced
75
- ? "zai/glm-5"
76
+ ? "zai/glm-5-turbo"
76
77
  : "zai/glm-4.5";
77
78
  }
78
79
  });
@@ -303,6 +303,8 @@ console.log(`Remaining requests: ${remainingRequests}, Remaining tokens: ${remai
303
303
 
304
304
  **traceId** (`string`): The trace ID associated with this execution when Tracing is enabled. Use this to correlate logs and debug execution flow.
305
305
 
306
+ **spanId** (`string`): The root span ID associated with this execution when Tracing is enabled. Use this for span-level lookup and correlation.
307
+
306
308
  **messages** (`MastraDBMessage[]`): All messages from this execution including input, memory history, and response.
307
309
 
308
310
  **rememberedMessages** (`MastraDBMessage[]`): Only messages loaded from memory (conversation history).
@@ -114,6 +114,8 @@ await agent.network(`
114
114
 
115
115
  **options.traceId** (`string`): The trace ID associated with this execution when Tracing is enabled. Use this to correlate logs and debug execution flow.
116
116
 
117
+ **options.spanId** (`string`): The root span ID associated with this execution when Tracing is enabled. Use this for span-level lookup and correlation.
118
+
117
119
  **options.onStepFinish** (`(event: any) => Promise<void> | void`): Callback fired after each LLM step within a sub-agent execution. Receives step details including finish reason and token usage.
118
120
 
119
121
  **options.onError** (`({ error }: { error: Error | string }) => Promise<void> | void`): Callback fired when an error occurs during sub-agent execution.
@@ -4,6 +4,10 @@ Creates a chat route handler for streaming agent conversations using the AI SDK
4
4
 
5
5
  Use [`handleChatStream()`](https://mastra.ai/reference/ai-sdk/handle-chat-stream) if you need a framework-agnostic handler.
6
6
 
7
+ > **Disconnect behavior:** `chatRoute()` forwards the incoming request's `AbortSignal` to `agent.stream()`. If the client disconnects, Mastra aborts the in-flight generation.
8
+ >
9
+ > If you want the server to continue generation and persist the final response after disconnect, build a [custom API route](https://mastra.ai/docs/server/custom-api-routes) around `agent.stream()` and call `consumeStream()` on the returned `MastraModelOutput`.
10
+
7
11
  ## Usage example
8
12
 
9
13
  This example shows how to set up a chat route at the `/chat` endpoint that uses an agent with the ID `weatherAgent`.
@@ -488,6 +488,8 @@ You only need this option if you're importing uncompiled source code directly. I
488
488
 
489
489
  Mastra automatically detects workspace packages in monorepo setups and adds them to this list, so you typically only need to specify external packages that require transpilation.
490
490
 
491
+ Mastra also resolves `tsconfig.json` `baseUrl` and `paths` aliases during build. This includes ESM-style imports such as `~/utils/logger.js` that point to TypeScript source files.
492
+
491
493
  ```typescript
492
494
  import { Mastra } from '@mastra/core'
493
495
 
@@ -621,9 +623,9 @@ export const mastra = new Mastra({
621
623
  ### server.host
622
624
 
623
625
  **Type:** `string`\
624
- **Default:** `localhost`
626
+ **Default:** `localhost` (or `MASTRA_HOST` environment variable if set)
625
627
 
626
- Host address the Mastra development server binds to.
628
+ Host address the Mastra development server binds to. If the `MASTRA_HOST` environment variable is set, it takes precedence over the default.
627
629
 
628
630
  ```typescript
629
631
  import { Mastra } from '@mastra/core'
@@ -50,7 +50,6 @@ export const mastra = new Mastra({
50
50
  ],
51
51
  vars: {
52
52
  NODE_ENV: 'production',
53
- API_KEY: '<api-key>',
54
53
  },
55
54
  d1_databases: [
56
55
  {
@@ -74,6 +73,18 @@ export const mastra = new Mastra({
74
73
 
75
74
  The `CloudflareDeployer` constructor accepts the same configuration options as `wrangler.jsonc`. See the [Wrangler configuration documentation](https://developers.cloudflare.com/workers/wrangler/configuration/) for all available options.
76
75
 
76
+ ## Secrets
77
+
78
+ Environment variables from your `.env` file are **not** written to `wrangler.jsonc`. This prevents secrets from being committed to source control.
79
+
80
+ To make your `.env` secrets available to your Cloudflare Worker, upload them as [Cloudflare Secrets](https://developers.cloudflare.com/workers/configuration/secrets/):
81
+
82
+ ```bash
83
+ npx wrangler secret bulk .env
84
+ ```
85
+
86
+ Use `vars` in the `CloudflareDeployer` constructor only for non-sensitive configuration values like `NODE_ENV`.
87
+
77
88
  ## Build output
78
89
 
79
90
  After running `mastra build`, the deployer generates a `wrangler.jsonc` file conforming to Cloudflare's [wrangler configuration](https://developers.cloudflare.com/workers/wrangler/configuration/). It points to files inside `.mastra/output` so you need to run `mastra build` before deploying with Wrangler.
@@ -17,7 +17,7 @@ const processor = new UnicodeNormalizer({
17
17
 
18
18
  **options** (`Options`): Configuration options for Unicode text normalization
19
19
 
20
- **options.stripControlChars** (`boolean`): Whether to strip control characters. When enabled, removes control characters except , ,
20
+ **options.stripControlChars** (`boolean`): Whether to strip control characters. When enabled, removes control characters except \` \`, \` \`, \` \`
21
21
 
22
22
  **options.preserveEmojis** (`boolean`): Whether to preserve emojis. When disabled, emojis may be removed if they contain control characters
23
23
 
@@ -212,6 +212,8 @@ const stream = await agent.stream('message for agent')
212
212
 
213
213
  **traceId** (`string`): The trace ID associated with this execution when Tracing is enabled. Use this to correlate logs and debug execution flow.
214
214
 
215
+ **spanId** (`string`): The root span ID associated with this execution when Tracing is enabled. Use this for span-level lookup and correlation.
216
+
215
217
  ## Extended usage example
216
218
 
217
219
  ### Mastra Format (Default)
@@ -38,6 +38,8 @@ const restartedResult = await run.restart()
38
38
 
39
39
  **traceId** (`string`): The trace ID associated with this execution when Tracing is enabled. Use this to correlate logs and debug execution flow.
40
40
 
41
+ **spanId** (`string`): The root span ID associated with this execution when Tracing is enabled. Use this for span-level lookup and correlation.
42
+
41
43
  ## Related
42
44
 
43
45
  - [Workflows overview](https://mastra.ai/docs/workflows/overview)
@@ -52,6 +52,8 @@ if (result.status === 'suspended') {
52
52
 
53
53
  **traceId** (`string`): The trace ID associated with this execution when Tracing is enabled. Use this to correlate logs and debug execution flow.
54
54
 
55
+ **spanId** (`string`): The root span ID associated with this execution when Tracing is enabled. Use this for span-level lookup and correlation.
56
+
55
57
  ## Extended usage example
56
58
 
57
59
  ```typescript
@@ -48,6 +48,8 @@ const result = await run.start({
48
48
 
49
49
  **traceId** (`string`): The trace ID associated with this execution when Tracing is enabled. Use this to correlate logs and debug execution flow.
50
50
 
51
+ **spanId** (`string`): The root span ID associated with this execution when Tracing is enabled. Use this for span-level lookup and correlation.
52
+
51
53
  ## Extended usage example
52
54
 
53
55
  ```typescript
@@ -59,6 +59,8 @@ const result = await run.timeTravel({
59
59
 
60
60
  **traceId** (`string`): The trace ID associated with this execution when Tracing is enabled. Use this to correlate logs and debug execution flow.
61
61
 
62
+ **spanId** (`string`): The root span ID associated with this execution when Tracing is enabled. Use this for span-level lookup and correlation.
63
+
62
64
  ## Extended usage examples
63
65
 
64
66
  ### Time travel with custom context
package/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  # @mastra/mcp-docs-server
2
2
 
3
+ ## 1.1.14-alpha.1
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [[`b77aa19`](https://github.com/mastra-ai/mastra/commit/b77aa1981361c021f2c881bee8f0c703687f00da), [`dd6ca1c`](https://github.com/mastra-ai/mastra/commit/dd6ca1cdea3b8b6182f4cf61df41070ba0cc0deb), [`4cb4edf`](https://github.com/mastra-ai/mastra/commit/4cb4edf3c909d197ec356c1790d13270514ffef6)]:
8
+ - @mastra/core@1.13.3-alpha.1
9
+
10
+ ## 1.1.14-alpha.0
11
+
12
+ ### Patch Changes
13
+
14
+ - Updated dependencies [[`51970b3`](https://github.com/mastra-ai/mastra/commit/51970b3828494d59a8dd4df143b194d37d31e3f5), [`bbcbbce`](https://github.com/mastra-ai/mastra/commit/bbcbbce4f0e268053cbb11ca58350f5ceba15498), [`085e371`](https://github.com/mastra-ai/mastra/commit/085e3718a7d0fe9a210fe7dd1c867b9bdfe8d16b), [`ce26fe2`](https://github.com/mastra-ai/mastra/commit/ce26fe2166dd90254f8bee5776e55977143e97de), [`b26307f`](https://github.com/mastra-ai/mastra/commit/b26307f050df39629511b0e831b8fc26973ce8b1)]:
15
+ - @mastra/core@1.13.3-alpha.0
16
+ - @mastra/mcp@1.2.2-alpha.0
17
+
3
18
  ## 1.1.13
4
19
 
5
20
  ### Patch Changes
@@ -1 +1 @@
1
- {"version":3,"file":"migration.d.ts","sourceRoot":"","sources":["../../src/prompts/migration.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,aAAa,CAAC;AA8BpD;;GAEG;AACH,eAAO,MAAM,uBAAuB,EAAE,gBAmBrC,CAAC"}
1
+ {"version":3,"file":"migration.d.ts","sourceRoot":"","sources":["../../src/prompts/migration.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAgB,MAAM,aAAa,CAAC;AA8BlE;;GAEG;AACH,eAAO,MAAM,uBAAuB,EAAE,gBAmBrC,CAAC"}