@mastra/mcp-docs-server 1.0.0-beta.3 → 1.0.0-beta.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (219) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
  2. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
  3. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
  4. package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
  5. package/.docs/organized/changelogs/%40mastra%2Fauth.md +6 -0
  6. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
  7. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
  8. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
  9. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
  10. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
  11. package/.docs/organized/changelogs/%40mastra%2Fcore.md +370 -170
  12. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
  16. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
  17. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
  18. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
  19. package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
  20. package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
  21. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
  22. package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
  23. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
  24. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
  25. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
  26. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
  27. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
  28. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
  29. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
  30. package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
  31. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
  32. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
  33. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
  34. package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
  35. package/.docs/organized/changelogs/%40mastra%2Freact.md +80 -1
  36. package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
  37. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +36 -0
  38. package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
  39. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
  40. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
  41. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
  42. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
  43. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
  54. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
  55. package/.docs/organized/changelogs/create-mastra.md +201 -1
  56. package/.docs/organized/changelogs/mastra.md +201 -1
  57. package/.docs/organized/code-examples/memory-with-processors.md +1 -1
  58. package/.docs/organized/code-examples/quick-start.md +1 -1
  59. package/.docs/raw/agents/adding-voice.mdx +55 -9
  60. package/.docs/raw/agents/guardrails.mdx +19 -20
  61. package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +6 -5
  62. package/.docs/raw/agents/networks.mdx +1 -2
  63. package/.docs/raw/agents/overview.mdx +5 -5
  64. package/.docs/raw/agents/using-tools.mdx +4 -5
  65. package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
  66. package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
  67. package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
  68. package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
  69. package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
  70. package/.docs/raw/deployment/building-mastra.mdx +1 -1
  71. package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
  72. package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
  73. package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
  74. package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
  75. package/.docs/raw/deployment/cloud-providers/index.mdx +1 -1
  76. package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
  77. package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
  78. package/.docs/raw/deployment/overview.mdx +2 -2
  79. package/.docs/raw/deployment/web-framework.mdx +5 -5
  80. package/.docs/raw/evals/custom-scorers.mdx +3 -5
  81. package/.docs/raw/evals/overview.mdx +2 -3
  82. package/.docs/raw/getting-started/project-structure.mdx +1 -1
  83. package/.docs/raw/getting-started/start.mdx +72 -0
  84. package/.docs/raw/getting-started/studio.mdx +1 -1
  85. package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +105 -11
  86. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
  87. package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
  88. package/.docs/raw/guides/{guide → getting-started}/manual-install.mdx +1 -1
  89. package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
  90. package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
  91. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
  92. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
  93. package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
  94. package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
  95. package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
  96. package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
  97. package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
  98. package/.docs/raw/guides/guide/web-search.mdx +12 -10
  99. package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
  100. package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
  101. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +29 -0
  102. package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
  103. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +22 -0
  104. package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
  105. package/.docs/raw/index.mdx +2 -2
  106. package/.docs/raw/mcp/overview.mdx +3 -5
  107. package/.docs/raw/memory/memory-processors.mdx +1 -2
  108. package/.docs/raw/memory/semantic-recall.mdx +7 -7
  109. package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
  110. package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
  111. package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
  112. package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
  113. package/.docs/raw/memory/threads-and-resources.mdx +3 -3
  114. package/.docs/raw/memory/working-memory.mdx +4 -5
  115. package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
  116. package/.docs/raw/observability/overview.mdx +2 -2
  117. package/.docs/raw/observability/tracing/exporters/otel.mdx +21 -2
  118. package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
  119. package/.docs/raw/observability/tracing/overview.mdx +3 -2
  120. package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
  121. package/.docs/raw/rag/overview.mdx +3 -2
  122. package/.docs/raw/rag/retrieval.mdx +20 -32
  123. package/.docs/raw/reference/agents/agent.mdx +7 -10
  124. package/.docs/raw/reference/agents/generate.mdx +11 -92
  125. package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
  126. package/.docs/raw/reference/agents/getLLM.mdx +1 -1
  127. package/.docs/raw/reference/agents/network.mdx +5 -88
  128. package/.docs/raw/reference/cli/mastra.mdx +2 -1
  129. package/.docs/raw/reference/client-js/agents.mdx +3 -3
  130. package/.docs/raw/reference/core/getLogger.mdx +1 -1
  131. package/.docs/raw/reference/core/listLogs.mdx +1 -1
  132. package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
  133. package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
  134. package/.docs/raw/reference/core/setLogger.mdx +1 -1
  135. package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
  136. package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
  137. package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
  138. package/.docs/raw/reference/evals/bias.mdx +29 -87
  139. package/.docs/raw/reference/evals/completeness.mdx +31 -90
  140. package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
  141. package/.docs/raw/reference/evals/context-precision.mdx +28 -130
  142. package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
  143. package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
  144. package/.docs/raw/reference/evals/hallucination.mdx +28 -103
  145. package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
  146. package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
  147. package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
  148. package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
  149. package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
  150. package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
  151. package/.docs/raw/reference/evals/toxicity.mdx +29 -92
  152. package/.docs/raw/reference/memory/memory-class.mdx +5 -7
  153. package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
  154. package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
  155. package/.docs/raw/reference/processors/language-detector.mdx +1 -1
  156. package/.docs/raw/reference/processors/moderation-processor.mdx +2 -2
  157. package/.docs/raw/reference/processors/pii-detector.mdx +2 -2
  158. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +1 -1
  159. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -3
  160. package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
  161. package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
  162. package/.docs/raw/reference/rag/embeddings.mdx +5 -5
  163. package/.docs/raw/reference/rag/rerank.mdx +1 -2
  164. package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
  165. package/.docs/raw/reference/streaming/agents/stream.mdx +11 -93
  166. package/.docs/raw/reference/templates/overview.mdx +1 -4
  167. package/.docs/raw/reference/tools/client.mdx +1 -2
  168. package/.docs/raw/reference/tools/create-tool.mdx +132 -0
  169. package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
  170. package/.docs/raw/reference/tools/mcp-client.mdx +2 -4
  171. package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
  172. package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
  173. package/.docs/raw/reference/vectors/chroma.mdx +81 -1
  174. package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
  175. package/.docs/raw/reference/vectors/lance.mdx +38 -22
  176. package/.docs/raw/reference/vectors/libsql.mdx +35 -2
  177. package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
  178. package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
  179. package/.docs/raw/reference/vectors/pg.mdx +43 -36
  180. package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
  181. package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
  182. package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
  183. package/.docs/raw/reference/voice/composite-voice.mdx +71 -28
  184. package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
  185. package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
  186. package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
  187. package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
  188. package/.docs/raw/reference/voice/voice.close.mdx +1 -1
  189. package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
  190. package/.docs/raw/reference/voice/voice.listen.mdx +86 -52
  191. package/.docs/raw/reference/voice/voice.off.mdx +1 -1
  192. package/.docs/raw/reference/voice/voice.on.mdx +1 -1
  193. package/.docs/raw/reference/voice/voice.send.mdx +1 -1
  194. package/.docs/raw/reference/voice/voice.speak.mdx +75 -40
  195. package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
  196. package/.docs/raw/server-db/mastra-client.mdx +1 -2
  197. package/.docs/raw/streaming/overview.mdx +20 -9
  198. package/.docs/raw/streaming/tool-streaming.mdx +47 -4
  199. package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
  200. package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
  201. package/.docs/raw/voice/overview.mdx +87 -40
  202. package/.docs/raw/voice/speech-to-speech.mdx +4 -4
  203. package/.docs/raw/voice/speech-to-text.mdx +1 -2
  204. package/.docs/raw/voice/text-to-speech.mdx +1 -2
  205. package/.docs/raw/workflows/control-flow.mdx +180 -0
  206. package/.docs/raw/workflows/overview.mdx +1 -1
  207. package/CHANGELOG.md +17 -0
  208. package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
  209. package/dist/prepare-docs/package-changes.d.ts.map +1 -1
  210. package/dist/prepare-docs/prepare.js +1 -1
  211. package/dist/stdio.js +1 -1
  212. package/package.json +7 -7
  213. package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
  214. package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
  215. package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
  216. package/.docs/raw/getting-started/quickstart.mdx +0 -27
  217. package/.docs/raw/getting-started/templates.mdx +0 -73
  218. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
  219. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
@@ -130,7 +130,7 @@ import { ModerationProcessor } from "@mastra/core/processors";
130
130
  export const agent = new Agent({
131
131
  name: "moderated-agent",
132
132
  instructions: "You are a helpful assistant",
133
- model: "openai/gpt-4o-mini",
133
+ model: "openai/gpt-5.1",
134
134
  inputProcessors: [
135
135
  new ModerationProcessor({
136
136
  model: "openai/gpt-4.1-nano",
@@ -155,7 +155,7 @@ import { BatchPartsProcessor, ModerationProcessor } from "@mastra/core/processor
155
155
  export const agent = new Agent({
156
156
  name: "output-moderated-agent",
157
157
  instructions: "You are a helpful assistant",
158
- model: "openai/gpt-4o-mini",
158
+ model: "openai/gpt-5.1",
159
159
  outputProcessors: [
160
160
  // Batch stream parts first to reduce LLM calls
161
161
  new BatchPartsProcessor({
@@ -137,7 +137,7 @@ import { PIIDetector } from "@mastra/core/processors";
137
137
  export const agent = new Agent({
138
138
  name: "private-agent",
139
139
  instructions: "You are a helpful assistant",
140
- model: "openai/gpt-4o-mini",
140
+ model: "openai/gpt-5.1",
141
141
  inputProcessors: [
142
142
  new PIIDetector({
143
143
  model: "openai/gpt-4.1-nano",
@@ -164,7 +164,7 @@ import { BatchPartsProcessor, PIIDetector } from "@mastra/core/processors";
164
164
  export const agent = new Agent({
165
165
  name: "output-pii-agent",
166
166
  instructions: "You are a helpful assistant",
167
- model: "openai/gpt-4o-mini",
167
+ model: "openai/gpt-5.1",
168
168
  outputProcessors: [
169
169
  // Batch stream parts first to reduce LLM calls
170
170
  new BatchPartsProcessor({
@@ -115,7 +115,7 @@ import { PromptInjectionDetector } from "@mastra/core/processors";
115
115
  export const agent = new Agent({
116
116
  name: "secure-agent",
117
117
  instructions: "You are a helpful assistant",
118
- model: "openai/gpt-4o-mini",
118
+ model: "openai/gpt-5.1",
119
119
  inputProcessors: [
120
120
  new PromptInjectionDetector({
121
121
  model: "openai/gpt-4.1-nano",
@@ -10,11 +10,10 @@ The `SystemPromptScrubber` is an **output processor** that detects and handles s
10
10
  ## Usage example
11
11
 
12
12
  ```typescript copy
13
- import { openai } from "@ai-sdk/openai";
14
13
  import { SystemPromptScrubber } from "@mastra/core/processors";
15
14
 
16
15
  const processor = new SystemPromptScrubber({
17
- model: openai("gpt-4.1-nano"),
16
+ model: "openai/gpt-4.1-nano",
18
17
  strategy: "redact",
19
18
  redactionMethod: "mask",
20
19
  includeDetections: true
@@ -131,7 +130,7 @@ import { BatchPartsProcessor, SystemPromptScrubber } from "@mastra/core/processo
131
130
  export const agent = new Agent({
132
131
  name: "scrubbed-agent",
133
132
  instructions: "You are a helpful assistant",
134
- model: "openai/gpt-4o-mini",
133
+ model: "openai/gpt-5.1",
135
134
  outputProcessors: [
136
135
  // Batch stream parts first to reduce LLM calls
137
136
  new BatchPartsProcessor({
@@ -45,7 +45,7 @@ const processor = new TokenLimiterProcessor({
45
45
  {
46
46
  name: "encoding",
47
47
  type: "TiktokenBPE",
48
- description: "Optional encoding to use. Defaults to o200k_base which is used by gpt-4o",
48
+ description: "Optional encoding to use. Defaults to o200k_base which is used by gpt-5.1",
49
49
  isOptional: true,
50
50
  default: "o200k_base",
51
51
  },
@@ -124,7 +124,7 @@ import { TokenLimiterProcessor } from "@mastra/core/processors";
124
124
  export const agent = new Agent({
125
125
  name: "limited-agent",
126
126
  instructions: "You are a helpful assistant",
127
- model: "openai/gpt-4o-mini",
127
+ model: "openai/gpt-5.1",
128
128
  outputProcessors: [
129
129
  new TokenLimiterProcessor({
130
130
  limit: 1000,
@@ -101,7 +101,7 @@ import { UnicodeNormalizer } from "@mastra/core/processors";
101
101
  export const agent = new Agent({
102
102
  name: "normalized-agent",
103
103
  instructions: "You are a helpful assistant",
104
- model: "openai/gpt-4o-mini",
104
+ model: "openai/gpt-5.1",
105
105
  inputProcessors: [
106
106
  new UnicodeNormalizer({
107
107
  stripControlChars: true,
@@ -13,9 +13,10 @@ The `embed` function generates a vector embedding for a single text input:
13
13
 
14
14
  ```typescript
15
15
  import { embed } from "ai";
16
+ import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
16
17
 
17
18
  const result = await embed({
18
- model: openai.embedding("text-embedding-3-small"),
19
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
19
20
  value: "Your text to embed",
20
21
  maxRetries: 2, // optional, defaults to 2
21
22
  });
@@ -80,7 +81,7 @@ For embedding multiple texts at once, use the `embedMany` function:
80
81
  import { embedMany } from "ai";
81
82
 
82
83
  const result = await embedMany({
83
- model: openai.embedding("text-embedding-3-small"),
84
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
84
85
  values: ["First text", "Second text", "Third text"],
85
86
  maxRetries: 2, // optional, defaults to 2
86
87
  });
@@ -142,17 +143,16 @@ const result = await embedMany({
142
143
 
143
144
  ```typescript
144
145
  import { embed, embedMany } from "ai";
145
- import { openai } from "@ai-sdk/openai";
146
146
 
147
147
  // Single embedding
148
148
  const singleResult = await embed({
149
- model: openai.embedding("text-embedding-3-small"),
149
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
150
150
  value: "What is the meaning of life?",
151
151
  });
152
152
 
153
153
  // Multiple embeddings
154
154
  const multipleResult = await embedMany({
155
- model: openai.embedding("text-embedding-3-small"),
155
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
156
156
  values: [
157
157
  "First question about life",
158
158
  "Second question about universe",
@@ -19,10 +19,9 @@ function rerank(
19
19
  ## Usage Example
20
20
 
21
21
  ```typescript
22
- import { openai } from "@ai-sdk/openai";
23
22
  import { rerank } from "@mastra/rag";
24
23
 
25
- const model = openai("gpt-4o-mini");
24
+ const model = "openai/gpt-5.1";
26
25
 
27
26
  const rerankedResults = await rerank(
28
27
  vectorSearchResults,
@@ -19,7 +19,6 @@ function rerankWithScorer({
19
19
  ## Usage Example
20
20
 
21
21
  ```typescript
22
- import { openai } from "@ai-sdk/openai";
23
22
  import { rerankWithScorer as rerank, CohereRelevanceScorer } from "@mastra/rag";
24
23
 
25
24
  const scorer = new CohereRelevanceScorer("rerank-v3.5");
@@ -3,6 +3,8 @@ title: "Reference: Agent.stream() | Streaming"
3
3
  description: "Documentation for the `Agent.stream()` method in Mastra agents, which enables real-time streaming of responses with enhanced capabilities."
4
4
  ---
5
5
 
6
+ import { MODEL_SETTINGS_OBJECT } from "@site/src/components/ModelSettingsProperties";
7
+
6
8
  # Agent.stream()
7
9
 
8
10
  The `.stream()` method enables real-time streaming of responses from an agent with enhanced capabilities and format flexibility. This method accepts messages and optional streaming options, providing a next-generation streaming experience with support for both Mastra's native format and AI SDK v5 compatibility.
@@ -252,6 +254,13 @@ const aiSdkStream = await agent.stream("message for agent", {
252
254
  description:
253
255
  "Overrides the output processors set on the agent. Output processors that can modify or validate messages from the agent before they are returned to the user. Must implement either (or both) of the `processOutputResult` and `processOutputStream` functions.",
254
256
  },
257
+ {
258
+ name: "includeRawChunks",
259
+ type: "boolean",
260
+ isOptional: true,
261
+ description:
262
+ "Whether to include raw chunks in the stream output (not available on all model providers).",
263
+ },
255
264
  {
256
265
  name: "inputProcessors",
257
266
  type: "Processor[]",
@@ -396,91 +405,7 @@ const aiSdkStream = await agent.stream("message for agent", {
396
405
  },
397
406
  ],
398
407
  },
399
- {
400
- name: "modelSettings",
401
- type: "CallSettings",
402
- isOptional: true,
403
- description:
404
- "Model-specific settings like temperature, maxTokens, topP, etc. These are passed to the underlying language model.",
405
- properties: [
406
- {
407
- parameters: [
408
- {
409
- name: "temperature",
410
- type: "number",
411
- isOptional: true,
412
- description:
413
- "Controls randomness in the model's output. Higher values (e.g., 0.8) make the output more random, lower values (e.g., 0.2) make it more focused and deterministic.",
414
- },
415
- ],
416
- },
417
- {
418
- parameters: [
419
- {
420
- name: "maxRetries",
421
- type: "number",
422
- isOptional: true,
423
- description: "Maximum number of retries for failed requests.",
424
- },
425
- ],
426
- },
427
- {
428
- parameters: [
429
- {
430
- name: "topP",
431
- type: "number",
432
- isOptional: true,
433
- description:
434
- "Nucleus sampling. This is a number between 0 and 1. It is recommended to set either temperature or topP, but not both.",
435
- },
436
- ],
437
- },
438
- {
439
- parameters: [
440
- {
441
- name: "topK",
442
- type: "number",
443
- isOptional: true,
444
- description:
445
- "Only sample from the top K options for each subsequent token. Used to remove 'long tail' low probability responses.",
446
- },
447
- ],
448
- },
449
- {
450
- parameters: [
451
- {
452
- name: "presencePenalty",
453
- type: "number",
454
- isOptional: true,
455
- description:
456
- "Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).",
457
- },
458
- ],
459
- },
460
- {
461
- parameters: [
462
- {
463
- name: "frequencyPenalty",
464
- type: "number",
465
- isOptional: true,
466
- description:
467
- "Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).",
468
- },
469
- ],
470
- },
471
- {
472
- parameters: [
473
- {
474
- name: "stopSequences",
475
- type: "string[]",
476
- isOptional: true,
477
- description:
478
- "Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated.",
479
- },
480
- ],
481
- },
482
- ],
483
- },
408
+ MODEL_SETTINGS_OBJECT,
484
409
  {
485
410
  name: "threadId",
486
411
  type: "string",
@@ -661,13 +586,6 @@ const aiSdkStream = await agent.stream("message for agent", {
661
586
  },
662
587
  ],
663
588
  },
664
- {
665
- name: "maxTokens",
666
- type: "number",
667
- isOptional: true,
668
- description:
669
- "Condition(s) that determine when to stop the agent's execution. Can be a single condition or array of conditions.",
670
- },
671
589
  ]}
672
590
  />
673
591
 
@@ -790,7 +708,7 @@ await agent.stream("message for agent", {
790
708
  sentiment: z.enum(["positive", "negative", "neutral"]),
791
709
  confidence: z.number(),
792
710
  }),
793
- model: "openai/gpt-4o-mini",
711
+ model: "openai/gpt-5.1",
794
712
  errorStrategy: "warn",
795
713
  },
796
714
  // Output processors for streaming response validation
@@ -118,13 +118,10 @@ We recommend using OpenAI, Anthropic, or Google model providers for templates. C
118
118
 
119
119
  ```typescript title="src/mastra/agents/example-agent.ts"
120
120
  import { Agent } from "@mastra/core/agent";
121
- import { openai } from "@ai-sdk/openai";
122
- // Or use: import { anthropic } from '@ai-sdk/anthropic';
123
- // Or use: import { google } from '@ai-sdk/google';
124
121
 
125
122
  const agent = new Agent({
126
123
  name: "example-agent",
127
- model: openai("gpt-4"), // or anthropic('') or google('')
124
+ model: "openai/gpt-5.1", // or other provider strings
128
125
  instructions: "Your agent instructions here",
129
126
  // ... other configuration
130
127
  });
@@ -230,7 +230,6 @@ Returns an object mapping tool names to their corresponding Mastra tool implemen
230
230
  ```typescript
231
231
  import { Agent } from "@mastra/core/agent";
232
232
  import { MastraMCPClient } from "@mastra/mcp";
233
- import { openai } from "@ai-sdk/openai";
234
233
 
235
234
  // Initialize the MCP client using mcp/fetch as an example https://hub.docker.com/r/mcp/fetch
236
235
  // Visit https://github.com/docker/mcp-servers for other reference docker mcp servers
@@ -250,7 +249,7 @@ const agent = new Agent({
250
249
  name: "Fetch agent",
251
250
  instructions:
252
251
  "You are able to fetch data from URLs on demand and discuss the response data with the user.",
253
- model: openai("gpt-4o-mini"),
252
+ model: "openai/gpt-5.1",
254
253
  });
255
254
 
256
255
  try {
@@ -131,6 +131,34 @@ export const tool = createTool({
131
131
  },
132
132
  ],
133
133
  },
134
+ {
135
+ name: "onInputStart",
136
+ type: "function",
137
+ description:
138
+ "Optional callback invoked when the tool call input streaming begins. Receives `toolCallId`, `messages`, and `abortSignal`.",
139
+ isOptional: true,
140
+ },
141
+ {
142
+ name: "onInputDelta",
143
+ type: "function",
144
+ description:
145
+ "Optional callback invoked for each incremental chunk of input text as it streams in. Receives `inputTextDelta`, `toolCallId`, `messages`, and `abortSignal`.",
146
+ isOptional: true,
147
+ },
148
+ {
149
+ name: "onInputAvailable",
150
+ type: "function",
151
+ description:
152
+ "Optional callback invoked when the complete tool input is available and parsed. Receives the validated `input` object, `toolCallId`, `messages`, and `abortSignal`.",
153
+ isOptional: true,
154
+ },
155
+ {
156
+ name: "onOutput",
157
+ type: "function",
158
+ description:
159
+ "Optional callback invoked after the tool has successfully executed and returned output. Receives the tool's `output`, `toolCallId`, `messages`, and `abortSignal`.",
160
+ isOptional: true,
161
+ },
134
162
  ]}
135
163
  />
136
164
 
@@ -149,8 +177,112 @@ The `createTool()` function returns a `Tool` object.
149
177
  ]}
150
178
  />
151
179
 
180
+ ## Tool Lifecycle Hooks
181
+
182
+ Tools support lifecycle hooks that allow you to monitor and react to different stages of tool execution. These hooks are particularly useful for logging, analytics, validation, and real-time updates during streaming.
183
+
184
+ ### Available Hooks
185
+
186
+ #### onInputStart
187
+
188
+ Called when tool call input streaming begins, before any input data is received.
189
+
190
+ ```typescript
191
+ export const tool = createTool({
192
+ id: "example-tool",
193
+ description: "Example tool with hooks",
194
+ onInputStart: ({ toolCallId, messages, abortSignal }) => {
195
+ console.log(`Tool ${toolCallId} input streaming started`);
196
+ },
197
+ // ... other properties
198
+ });
199
+ ```
200
+
201
+ #### onInputDelta
202
+
203
+ Called for each incremental chunk of input text as it streams in. Useful for showing real-time progress or parsing partial JSON.
204
+
205
+ ```typescript
206
+ export const tool = createTool({
207
+ id: "example-tool",
208
+ description: "Example tool with hooks",
209
+ onInputDelta: ({ inputTextDelta, toolCallId, messages, abortSignal }) => {
210
+ console.log(`Received input chunk: ${inputTextDelta}`);
211
+ },
212
+ // ... other properties
213
+ });
214
+ ```
215
+
216
+ #### onInputAvailable
217
+
218
+ Called when the complete tool input is available and has been parsed and validated against the `inputSchema`.
219
+
220
+ ```typescript
221
+ export const tool = createTool({
222
+ id: "example-tool",
223
+ description: "Example tool with hooks",
224
+ inputSchema: z.object({
225
+ city: z.string(),
226
+ }),
227
+ onInputAvailable: ({ input, toolCallId, messages, abortSignal }) => {
228
+ console.log(`Tool received complete input:`, input);
229
+ // input is fully typed based on inputSchema
230
+ },
231
+ // ... other properties
232
+ });
233
+ ```
234
+
235
+ #### onOutput
236
+
237
+ Called after the tool has successfully executed and returned output. Useful for logging results, triggering follow-up actions, or analytics.
238
+
239
+ ```typescript
240
+ export const tool = createTool({
241
+ id: "example-tool",
242
+ description: "Example tool with hooks",
243
+ outputSchema: z.object({
244
+ result: z.string(),
245
+ }),
246
+ execute: async (input) => {
247
+ return { result: "Success" };
248
+ },
249
+ onOutput: ({ output, toolCallId, toolName, abortSignal }) => {
250
+ console.log(`${toolName} execution completed:`, output);
251
+ // output is fully typed based on outputSchema
252
+ },
253
+ });
254
+ ```
255
+
256
+ ### Hook Execution Order
257
+
258
+ For a typical streaming tool call, the hooks are invoked in this order:
259
+
260
+ 1. **onInputStart** - Input streaming begins
261
+ 2. **onInputDelta** - Called multiple times as chunks arrive
262
+ 3. **onInputAvailable** - Complete input is parsed and validated
263
+ 4. Tool's **execute** function runs
264
+ 5. **onOutput** - Tool has completed successfully
265
+
266
+ ### Hook Parameters
267
+
268
+ All hooks receive a parameter object with these common properties:
269
+
270
+ - `toolCallId` (string): Unique identifier for this specific tool call
271
+ - `abortSignal` (AbortSignal): Signal for detecting if the operation should be cancelled
272
+
273
+ Additionally:
274
+ - `onInputStart`, `onInputDelta`, and `onInputAvailable` receive `messages` (array): The conversation messages at the time of the tool call
275
+ - `onInputDelta` receives `inputTextDelta` (string): The incremental text chunk
276
+ - `onInputAvailable` receives `input`: The validated input data (typed according to `inputSchema`)
277
+ - `onOutput` receives `output`: The tool's return value (typed according to `outputSchema`) and `toolName` (string): The name of the tool
278
+
279
+ ### Error Handling
280
+
281
+ Hook errors are caught and logged automatically, but do not prevent tool execution from continuing. If a hook throws an error, it will be logged to the console but will not fail the tool call.
282
+
152
283
  ## Related
153
284
 
154
285
  - [MCP Overview](/docs/v1/mcp/overview)
155
286
  - [Using Tools with Agents](/docs/v1/agents/using-tools)
287
+ - [Tool Streaming](/docs/v1/streaming/tool-streaming)
156
288
  - [Request Context](/docs/v1/server-db/request-context#accessing-values-with-tools)
@@ -10,13 +10,13 @@ The `createGraphRAGTool()` creates a tool that enhances RAG by building a graph
10
10
  ## Usage Example
11
11
 
12
12
  ```typescript
13
- import { openai } from "@ai-sdk/openai";
14
13
  import { createGraphRAGTool } from "@mastra/rag";
14
+ import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
15
15
 
16
16
  const graphTool = createGraphRAGTool({
17
17
  vectorStoreName: "pinecone",
18
18
  indexName: "docs",
19
- model: openai.embedding("text-embedding-3-small"),
19
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
20
20
  graphOptions: {
21
21
  dimension: 1536,
22
22
  threshold: 0.7,
@@ -193,7 +193,7 @@ The default description focuses on:
193
193
  const graphTool = createGraphRAGTool({
194
194
  vectorStoreName: "pinecone",
195
195
  indexName: "docs",
196
- model: openai.embedding("text-embedding-3-small"),
196
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
197
197
  graphOptions: {
198
198
  dimension: 1536,
199
199
  threshold: 0.8, // Higher similarity threshold
@@ -209,7 +209,7 @@ const graphTool = createGraphRAGTool({
209
209
  const graphTool = createGraphRAGTool({
210
210
  vectorStoreName: "pinecone",
211
211
  indexName: "docs",
212
- model: openai.embedding("text-embedding-3-small"),
212
+ model: "openai/text-embedding-3-small ",
213
213
  description:
214
214
  "Analyze document relationships to find complex patterns and connections in our company's historical data",
215
215
  });
@@ -223,7 +223,7 @@ This example shows how to customize the tool description for a specific use case
223
223
  const graphTool = createGraphRAGTool({
224
224
  vectorStoreName: "pinecone",
225
225
  indexName: "docs",
226
- model: openai.embedding("text-embedding-3-small"),
226
+ model: "openai/text-embedding-3-small ",
227
227
  });
228
228
  ```
229
229
 
@@ -632,7 +632,6 @@ For tools where you have a single connection to the MCP server for you entire ap
632
632
  ```typescript
633
633
  import { MCPClient } from "@mastra/mcp";
634
634
  import { Agent } from "@mastra/core/agent";
635
- import { openai } from "@ai-sdk/openai";
636
635
 
637
636
  const mcp = new MCPClient({
638
637
  servers: {
@@ -657,7 +656,7 @@ const mcp = new MCPClient({
657
656
  const agent = new Agent({
658
657
  name: "Multi-tool Agent",
659
658
  instructions: "You have access to multiple tool servers.",
660
- model: openai("gpt-4"),
659
+ model: "openai/gpt-5.1",
661
660
  tools: await mcp.listTools(),
662
661
  });
663
662
 
@@ -707,13 +706,12 @@ When you need a new MCP connection for each user, use `listToolsets()` and add t
707
706
  ```typescript
708
707
  import { Agent } from "@mastra/core/agent";
709
708
  import { MCPClient } from "@mastra/mcp";
710
- import { openai } from "@ai-sdk/openai";
711
709
 
712
710
  // Create the agent first, without any tools
713
711
  const agent = new Agent({
714
712
  name: "Multi-tool Agent",
715
713
  instructions: "You help users check stocks and weather.",
716
- model: openai("gpt-4"),
714
+ model: "openai/gpt-5.1",
717
715
  });
718
716
 
719
717
  // Later, configure MCP with user-specific settings
@@ -16,7 +16,6 @@ It supports both [stdio (subprocess) and SSE (HTTP) MCP transports](https://mode
16
16
  To create a new `MCPServer`, you need to provide some basic information about your server, the tools it will offer, and optionally, any agents you want to expose as tools.
17
17
 
18
18
  ```typescript
19
- import { openai } from "@ai-sdk/openai";
20
19
  import { Agent } from "@mastra/core/agent";
21
20
  import { createTool } from "@mastra/core/tools";
22
21
  import { MCPServer } from "@mastra/mcp";
@@ -28,7 +27,7 @@ const myAgent = new Agent({
28
27
  name: "MyExampleAgent",
29
28
  description: "A generalist to help with basic questions."
30
29
  instructions: "You are a helpful assistant.",
31
- model: openai("gpt-4o-mini"),
30
+ model: "openai/gpt-5.1",
32
31
  });
33
32
 
34
33
  const weatherTool = createTool({