@mastra/mcp-docs-server 1.0.0-beta.5 → 1.0.0-beta.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (163) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +9 -9
  2. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +67 -67
  3. package/.docs/organized/changelogs/%40mastra%2Fastra.md +10 -10
  4. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +12 -12
  5. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +57 -57
  6. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +110 -110
  7. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +57 -57
  8. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +57 -57
  9. package/.docs/organized/changelogs/%40mastra%2Fcodemod.md +6 -0
  10. package/.docs/organized/changelogs/%40mastra%2Fconvex.md +60 -0
  11. package/.docs/organized/changelogs/%40mastra%2Fcore.md +358 -358
  12. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +11 -11
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +24 -24
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +12 -12
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +79 -79
  16. package/.docs/organized/changelogs/%40mastra%2Fduckdb.md +42 -0
  17. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +57 -57
  18. package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +61 -0
  19. package/.docs/organized/changelogs/%40mastra%2Fevals.md +12 -12
  20. package/.docs/organized/changelogs/%40mastra%2Flance.md +57 -57
  21. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +55 -55
  22. package/.docs/organized/changelogs/%40mastra%2Floggers.md +12 -12
  23. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +17 -17
  24. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +125 -125
  25. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +36 -36
  26. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +57 -57
  27. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +57 -57
  28. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
  29. package/.docs/organized/changelogs/%40mastra%2Fpg.md +59 -59
  30. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +10 -10
  31. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +77 -77
  32. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +10 -10
  33. package/.docs/organized/changelogs/%40mastra%2Frag.md +43 -43
  34. package/.docs/organized/changelogs/%40mastra%2Freact.md +16 -0
  35. package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
  36. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
  37. package/.docs/organized/changelogs/%40mastra%2Fserver.md +113 -113
  38. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +10 -10
  39. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +57 -57
  40. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
  41. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +19 -19
  42. package/.docs/organized/changelogs/create-mastra.md +15 -15
  43. package/.docs/organized/changelogs/mastra.md +30 -30
  44. package/.docs/organized/code-examples/agui.md +1 -0
  45. package/.docs/organized/code-examples/ai-elements.md +1 -1
  46. package/.docs/organized/code-examples/ai-sdk-useChat.md +1 -1
  47. package/.docs/organized/code-examples/ai-sdk-v5.md +2 -1
  48. package/.docs/organized/code-examples/assistant-ui.md +1 -1
  49. package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +1 -1
  50. package/.docs/organized/code-examples/bird-checker-with-nextjs.md +1 -1
  51. package/.docs/organized/code-examples/crypto-chatbot.md +1 -1
  52. package/.docs/organized/code-examples/mcp-server-adapters.md +721 -0
  53. package/.docs/organized/code-examples/server-app-access.md +342 -0
  54. package/.docs/organized/code-examples/server-express-adapter.md +87 -0
  55. package/.docs/organized/code-examples/server-hono-adapter.md +85 -0
  56. package/.docs/raw/agents/agent-approval.mdx +189 -0
  57. package/.docs/raw/agents/guardrails.mdx +13 -9
  58. package/.docs/raw/agents/networks.mdx +1 -0
  59. package/.docs/raw/agents/overview.mdx +8 -152
  60. package/.docs/raw/agents/processors.mdx +279 -0
  61. package/.docs/raw/agents/structured-output.mdx +224 -0
  62. package/.docs/raw/deployment/cloud-providers/index.mdx +19 -26
  63. package/.docs/raw/deployment/cloud-providers/netlify-deployer.mdx +44 -13
  64. package/.docs/raw/evals/running-in-ci.mdx +0 -2
  65. package/.docs/raw/{guides/getting-started → getting-started}/manual-install.mdx +2 -2
  66. package/.docs/raw/getting-started/start.mdx +1 -1
  67. package/.docs/raw/guides/build-your-ui/ai-sdk-ui.mdx +8 -0
  68. package/.docs/raw/guides/getting-started/quickstart.mdx +1 -1
  69. package/.docs/raw/guides/guide/whatsapp-chat-bot.mdx +421 -0
  70. package/.docs/raw/guides/index.mdx +3 -35
  71. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +11 -0
  72. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +37 -0
  73. package/.docs/raw/index.mdx +1 -1
  74. package/.docs/raw/memory/memory-processors.mdx +265 -79
  75. package/.docs/raw/memory/working-memory.mdx +11 -2
  76. package/.docs/raw/observability/overview.mdx +0 -1
  77. package/.docs/raw/observability/tracing/bridges/otel.mdx +200 -0
  78. package/.docs/raw/observability/tracing/exporters/arize.mdx +36 -0
  79. package/.docs/raw/observability/tracing/exporters/braintrust.mdx +19 -0
  80. package/.docs/raw/observability/tracing/exporters/langfuse.mdx +83 -0
  81. package/.docs/raw/observability/tracing/exporters/langsmith.mdx +12 -0
  82. package/.docs/raw/observability/tracing/exporters/otel.mdx +34 -22
  83. package/.docs/raw/observability/tracing/exporters/posthog.mdx +20 -0
  84. package/.docs/raw/observability/tracing/overview.mdx +76 -6
  85. package/.docs/raw/observability/tracing/processors/sensitive-data-filter.mdx +0 -1
  86. package/.docs/raw/rag/retrieval.mdx +23 -6
  87. package/.docs/raw/rag/vector-databases.mdx +93 -2
  88. package/.docs/raw/reference/agents/generate.mdx +55 -6
  89. package/.docs/raw/reference/agents/network.mdx +44 -0
  90. package/.docs/raw/reference/client-js/memory.mdx +43 -0
  91. package/.docs/raw/reference/client-js/workflows.mdx +92 -63
  92. package/.docs/raw/reference/deployer/netlify.mdx +1 -2
  93. package/.docs/raw/reference/evals/scorer-utils.mdx +362 -0
  94. package/.docs/raw/reference/index.mdx +1 -0
  95. package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +177 -0
  96. package/.docs/raw/reference/observability/tracing/configuration.mdx +0 -4
  97. package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +29 -0
  98. package/.docs/raw/reference/observability/tracing/exporters/langfuse.mdx +43 -0
  99. package/.docs/raw/reference/observability/tracing/exporters/langsmith.mdx +17 -1
  100. package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +33 -43
  101. package/.docs/raw/reference/observability/tracing/instances.mdx +0 -4
  102. package/.docs/raw/reference/observability/tracing/interfaces.mdx +29 -4
  103. package/.docs/raw/reference/observability/tracing/spans.mdx +0 -4
  104. package/.docs/raw/reference/processors/language-detector.mdx +9 -2
  105. package/.docs/raw/reference/processors/message-history-processor.mdx +131 -0
  106. package/.docs/raw/reference/processors/moderation-processor.mdx +10 -3
  107. package/.docs/raw/reference/processors/pii-detector.mdx +10 -3
  108. package/.docs/raw/reference/processors/processor-interface.mdx +502 -0
  109. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +9 -2
  110. package/.docs/raw/reference/processors/semantic-recall-processor.mdx +197 -0
  111. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -2
  112. package/.docs/raw/reference/processors/tool-call-filter.mdx +125 -0
  113. package/.docs/raw/reference/processors/working-memory-processor.mdx +221 -0
  114. package/.docs/raw/reference/server/create-route.mdx +314 -0
  115. package/.docs/raw/reference/server/express-adapter.mdx +193 -0
  116. package/.docs/raw/reference/server/hono-adapter.mdx +174 -0
  117. package/.docs/raw/reference/server/mastra-server.mdx +316 -0
  118. package/.docs/raw/reference/server/routes.mdx +250 -0
  119. package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
  120. package/.docs/raw/reference/storage/convex.mdx +164 -0
  121. package/.docs/raw/reference/storage/lance.mdx +33 -0
  122. package/.docs/raw/reference/storage/libsql.mdx +37 -0
  123. package/.docs/raw/reference/storage/mongodb.mdx +39 -0
  124. package/.docs/raw/reference/storage/mssql.mdx +37 -0
  125. package/.docs/raw/reference/storage/postgresql.mdx +37 -0
  126. package/.docs/raw/reference/streaming/ChunkType.mdx +1 -1
  127. package/.docs/raw/reference/streaming/agents/stream.mdx +56 -1
  128. package/.docs/raw/reference/streaming/workflows/observeStream.mdx +7 -9
  129. package/.docs/raw/reference/streaming/workflows/{resumeStreamVNext.mdx → resumeStream.mdx} +51 -11
  130. package/.docs/raw/reference/streaming/workflows/stream.mdx +83 -24
  131. package/.docs/raw/reference/streaming/workflows/timeTravelStream.mdx +170 -0
  132. package/.docs/raw/reference/tools/mcp-client.mdx +128 -18
  133. package/.docs/raw/reference/vectors/convex.mdx +429 -0
  134. package/.docs/raw/reference/vectors/duckdb.mdx +462 -0
  135. package/.docs/raw/reference/vectors/elasticsearch.mdx +310 -0
  136. package/.docs/raw/reference/voice/google.mdx +159 -20
  137. package/.docs/raw/reference/workflows/run-methods/restart.mdx +142 -0
  138. package/.docs/raw/reference/workflows/run-methods/resume.mdx +44 -0
  139. package/.docs/raw/reference/workflows/run-methods/start.mdx +44 -0
  140. package/.docs/raw/reference/workflows/run-methods/timeTravel.mdx +310 -0
  141. package/.docs/raw/reference/workflows/run.mdx +27 -5
  142. package/.docs/raw/reference/workflows/step.mdx +13 -0
  143. package/.docs/raw/reference/workflows/workflow.mdx +19 -0
  144. package/.docs/raw/server-db/custom-adapters.mdx +380 -0
  145. package/.docs/raw/server-db/mastra-server.mdx +16 -8
  146. package/.docs/raw/server-db/request-context.mdx +0 -1
  147. package/.docs/raw/server-db/server-adapters.mdx +286 -0
  148. package/.docs/raw/server-db/storage.mdx +11 -0
  149. package/.docs/raw/streaming/overview.mdx +6 -6
  150. package/.docs/raw/streaming/tool-streaming.mdx +2 -2
  151. package/.docs/raw/streaming/workflow-streaming.mdx +5 -11
  152. package/.docs/raw/workflows/error-handling.mdx +1 -0
  153. package/.docs/raw/workflows/human-in-the-loop.mdx +4 -4
  154. package/.docs/raw/workflows/overview.mdx +56 -44
  155. package/.docs/raw/workflows/snapshots.mdx +1 -0
  156. package/.docs/raw/workflows/suspend-and-resume.mdx +85 -16
  157. package/.docs/raw/workflows/time-travel.mdx +313 -0
  158. package/.docs/raw/workflows/workflow-state.mdx +191 -0
  159. package/CHANGELOG.md +16 -0
  160. package/package.json +4 -4
  161. package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +0 -91
  162. package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +0 -47
  163. package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +0 -153
@@ -1,502 +1,502 @@
1
1
  # @mastra/core
2
2
 
3
- ## 1.0.0-beta.5
3
+ ## 1.0.0-beta.7
4
4
 
5
- ### Patch Changes
5
+ ### Minor Changes
6
6
 
7
- - Add Azure OpenAI gateway ([#9990](https://github.com/mastra-ai/mastra/pull/9990))
7
+ - Add `disableInit` option to all storage adapters ([#10851](https://github.com/mastra-ai/mastra/pull/10851))
8
8
 
9
- The Azure OpenAI gateway supports three configuration modes:
10
- 1. **Static deployments**: Provide deployment names from Azure Portal
11
- 2. **Dynamic discovery**: Query Azure Management API for available deployments
12
- 3. **Manual**: Specify deployment names when creating agents
13
-
14
- ## Usage
9
+ Adds a new `disableInit` config option to all storage providers that allows users to disable automatic table creation/migrations at runtime. This is useful for CI/CD pipelines where you want to run migrations during deployment with elevated credentials, then run the application with `disableInit: true` so it doesn't attempt schema changes at runtime.
15
10
 
16
11
  ```typescript
17
- import { Mastra } from '@mastra/core';
18
- import { AzureOpenAIGateway } from '@mastra/core/llm';
19
-
20
- // Static mode (recommended)
21
- export const mastra = new Mastra({
22
- gateways: [
23
- new AzureOpenAIGateway({
24
- resourceName: process.env.AZURE_RESOURCE_NAME!,
25
- apiKey: process.env.AZURE_API_KEY!,
26
- deployments: ['gpt-4-prod', 'gpt-35-turbo-dev'],
27
- }),
28
- ],
12
+ // CI/CD script - run migrations
13
+ const storage = new PostgresStore({
14
+ connectionString: DATABASE_URL,
15
+ id: 'pg-storage',
29
16
  });
17
+ await storage.init();
30
18
 
31
- // Dynamic discovery mode
32
- export const mastra = new Mastra({
33
- gateways: [
34
- new AzureOpenAIGateway({
35
- resourceName: process.env.AZURE_RESOURCE_NAME!,
36
- apiKey: process.env.AZURE_API_KEY!,
37
- management: {
38
- tenantId: process.env.AZURE_TENANT_ID!,
39
- clientId: process.env.AZURE_CLIENT_ID!,
40
- clientSecret: process.env.AZURE_CLIENT_SECRET!,
41
- subscriptionId: process.env.AZURE_SUBSCRIPTION_ID!,
42
- resourceGroup: 'my-resource-group',
43
- },
44
- }),
45
- ],
19
+ // Runtime - skip auto-init
20
+ const storage = new PostgresStore({
21
+ connectionString: DATABASE_URL,
22
+ id: 'pg-storage',
23
+ disableInit: true,
46
24
  });
25
+ ```
47
26
 
48
- // Use Azure OpenAI models
49
- const agent = new Agent({
50
- model: 'azure-openai/gpt-4-deployment',
51
- instructions: 'You are a helpful assistant',
52
- });
27
+ ### Patch Changes
28
+
29
+ - Add time-to-first-token (TTFT) support for Langfuse integration ([#10781](https://github.com/mastra-ai/mastra/pull/10781))
30
+
31
+ Adds `completionStartTime` to model generation spans, which Langfuse uses to calculate TTFT metrics. The timestamp is automatically captured when the first content chunk arrives during streaming.
32
+
33
+ ```typescript
34
+ // completionStartTime is now automatically captured and sent to Langfuse
35
+ // enabling TTFT metrics in your Langfuse dashboard
36
+ const result = await agent.stream('Hello');
53
37
  ```
54
38
 
55
- - - Fix tool suspension throwing error when `outputSchema` is passed to tool during creation ([#10444](https://github.com/mastra-ai/mastra/pull/10444))
56
- - Pass `suspendSchema` and `resumeSchema` from tool into step created when creating step from tool
39
+ - Updated OtelExporters, Bridge, and Arize packages to better implement GenAI v1.38.0 Otel Semantic Conventions. See: ([#10591](https://github.com/mastra-ai/mastra/pull/10591))
40
+ https://github.com/open-telemetry/semantic-conventions/blob/v1.38.0/docs/gen-ai/README.md
57
41
 
58
- - Add `onOutput` hook for tools ([#10466](https://github.com/mastra-ai/mastra/pull/10466))
42
+ - Standardize error IDs across all storage and vector stores using centralized helper functions (`createStorageErrorId` and `createVectorErrorId`). This ensures consistent error ID patterns (`MASTRA_STORAGE_{STORE}_{OPERATION}_{STATUS}` and `MASTRA_VECTOR_{STORE}_{OPERATION}_{STATUS}`) across the codebase for better error tracking and debugging. ([#10913](https://github.com/mastra-ai/mastra/pull/10913))
59
43
 
60
- Tools now support an `onOutput` lifecycle hook that is invoked after successful tool execution. This complements the existing `onInputStart`, `onInputDelta`, and `onInputAvailable` hooks to provide complete visibility into the tool execution lifecycle.
44
+ - fix: generate unique text IDs for Anthropic/Google providers ([#10740](https://github.com/mastra-ai/mastra/pull/10740))
61
45
 
62
- The `onOutput` hook receives:
63
- - `output`: The tool's return value (typed according to `outputSchema`)
64
- - `toolCallId`: Unique identifier for the tool call
65
- - `toolName`: The name of the tool that was executed
66
- - `abortSignal`: Signal for detecting if the operation should be cancelled
46
+ Workaround for duplicate text-start/text-end IDs in multi-step agentic flows.
67
47
 
68
- Example usage:
48
+ The `@ai-sdk/anthropic` and `@ai-sdk/google` providers use numeric indices ("0", "1", etc.) for text block IDs that reset for each LLM call. This caused duplicate IDs when an agent does TEXT → TOOL → TEXT, breaking message ordering and storage.
69
49
 
70
- ```typescript
71
- import { createTool } from '@mastra/core/tools';
72
- import { z } from 'zod';
73
-
74
- export const weatherTool = createTool({
75
- id: 'weather-tool',
76
- description: 'Get weather information',
77
- outputSchema: z.object({
78
- temperature: z.number(),
79
- conditions: z.string(),
80
- }),
81
- execute: async input => {
82
- return { temperature: 72, conditions: 'sunny' };
83
- },
84
- onOutput: ({ output, toolCallId, toolName }) => {
85
- console.log(`${toolName} completed:`, output);
86
- // output is fully typed based on outputSchema
87
- },
88
- });
89
- ```
50
+ The fix replaces numeric IDs with UUIDs, maintaining a map per step so text-start, text-delta, and text-end chunks for the same block share the same UUID. OpenAI's UUIDs pass through unchanged.
51
+
52
+ Related: #9909
53
+
54
+ - Fix sub-agent requestContext propagation in listAgentTools ([#10844](https://github.com/mastra-ai/mastra/pull/10844))
55
+
56
+ Sub-agents with dynamic model configurations were broken because `requestContext` was not being passed to `getModel()` when creating agent tools. This caused sub-agents using function-based model configurations to receive an empty context instead of the parent's context.
90
57
 
91
- Hook execution order:
92
- 1. `onInputStart` - Input streaming begins
93
- 2. `onInputDelta` - Input chunks arrive (called multiple times)
94
- 3. `onInputAvailable` - Complete input parsed and validated
95
- 4. Tool's `execute` function runs
96
- 5. `onOutput` - Tool completed successfully (NEW)
58
+ No code changes required for consumers - this fix restores expected behavior for dynamic model configurations in sub-agents.
97
59
 
98
- - Add new deleteVectors, updateVector by filter ([#10408](https://github.com/mastra-ai/mastra/pull/10408))
60
+ - Fix ToolStream type error when piping streams with different types ([#10845](https://github.com/mastra-ai/mastra/pull/10845))
99
61
 
100
- - Fix base64 encoded images with threads - issue #10480 ([#10483](https://github.com/mastra-ai/mastra/pull/10483))
62
+ Changes `ToolStream` to extend `WritableStream<unknown>` instead of `WritableStream<T>`. This fixes the TypeScript error when piping `objectStream` or `fullStream` to `writer` in workflow steps.
101
63
 
102
- Fixed "Invalid URL" error when using base64 encoded images (without `data:` prefix) in agent calls with threads and resources. Raw base64 strings are now automatically converted to proper data URIs before being processed.
64
+ Before:
103
65
 
104
- **Changes:**
105
- - Updated `attachments-to-parts.ts` to detect and convert raw base64 strings to data URIs
106
- - Fixed `MessageList` image processing to handle raw base64 in two locations:
107
- - Image part conversion in `aiV4CoreMessageToV1PromptMessage`
108
- - File part to experimental_attachments conversion in `mastraDBMessageToAIV4UIMessage`
109
- - Added comprehensive tests for base64 images, data URIs, and HTTP URLs with threads
66
+ ```typescript
67
+ // TypeError: ToolStream<ChunkType> is not assignable to WritableStream<Partial<StoryPlan>>
68
+ await response.objectStream.pipeTo(writer);
69
+ ```
110
70
 
111
- **Breaking Change:** None - this is a bug fix that maintains backward compatibility while adding support for raw base64 strings.
71
+ After:
112
72
 
113
- - Fix message metadata not persisting when using simple message format. Previously, custom metadata passed in messages (e.g., `{role: 'user', content: 'text', metadata: {userId: '123'}}`) was not being saved to the database. This occurred because the CoreMessage conversion path didn't preserve metadata fields. ([#10488](https://github.com/mastra-ai/mastra/pull/10488))
73
+ ```typescript
74
+ // Works without type errors
75
+ await response.objectStream.pipeTo(writer);
76
+ ```
114
77
 
115
- Now metadata is properly preserved for all message input formats:
116
- - Simple CoreMessage format: `{role, content, metadata}`
117
- - Full UIMessage format: `{role, content, parts, metadata}`
118
- - AI SDK v5 ModelMessage format with metadata
78
+ - feat: add native Perplexity provider support ([#10885](https://github.com/mastra-ai/mastra/pull/10885))
119
79
 
120
- Fixes #8556
80
+ - When sending the first message to a new thread with PostgresStore, users would get a "Thread not found" error. This happened because the thread was created in memory but not persisted to the database before the MessageHistory output processor tried to save messages. ([#10881](https://github.com/mastra-ai/mastra/pull/10881))
121
81
 
122
- - feat: Composite auth implementation ([#10359](https://github.com/mastra-ai/mastra/pull/10359))
82
+ **Before:**
123
83
 
124
- - Fix requireApproval property being ignored for tools passed via toolsets, clientTools, and memoryTools parameters. The requireApproval flag now correctly propagates through all tool conversion paths, ensuring tools requiring approval will properly request user approval before execution. ([#10464](https://github.com/mastra-ai/mastra/pull/10464))
84
+ ```ts
85
+ threadObject = await memory.createThread({
86
+ // ...
87
+ saveThread: false, // thread not in DB yet
88
+ });
89
+ // Later: MessageHistory calls saveMessages() -> PostgresStore throws "Thread not found"
90
+ ```
125
91
 
126
- - Add timeTravel APIs and add timeTravel feature to studio ([#10361](https://github.com/mastra-ai/mastra/pull/10361))
92
+ **After:**
127
93
 
128
- - Fix Azure Foundry rate limit handling for -1 values ([#10409](https://github.com/mastra-ai/mastra/pull/10409))
94
+ ```ts
95
+ threadObject = await memory.createThread({
96
+ // ...
97
+ saveThread: true, // thread persisted immediately
98
+ });
99
+ // MessageHistory can now save messages without error
100
+ ```
129
101
 
130
- - Fix model headers not being passed through gateway system ([#10465](https://github.com/mastra-ai/mastra/pull/10465))
102
+ - Emit error chunk and call onError when agent workflow step fails ([#10907](https://github.com/mastra-ai/mastra/pull/10907))
131
103
 
132
- Previously, custom headers specified in `MastraModelConfig` were not being passed through the gateway system to model providers. This affected:
133
- - OpenRouter (preventing activity tracking with `HTTP-Referer` and `X-Title`)
134
- - Custom providers using custom URLs (headers not passed to `createOpenAICompatible`)
135
- - Custom gateway implementations (headers not available in `resolveLanguageModel`)
104
+ When a workflow step fails (e.g., tool not found), the error is now properly emitted as an error chunk to the stream and the onError callback is called. This fixes the issue where agent.generate() would throw "promise 'text' was not resolved or rejected" instead of the actual error message.
136
105
 
137
- Now headers are correctly passed through the entire gateway system:
138
- - Base `MastraModelGateway` interface updated to accept headers
139
- - `ModelRouterLanguageModel` passes headers from config to all gateways
140
- - OpenRouter receives headers for activity tracking
141
- - Custom URL providers receive headers via `createOpenAICompatible`
142
- - Custom gateways can access headers in their `resolveLanguageModel` implementation
106
+ - fix(core): use agent description when converting agent to tool ([#10879](https://github.com/mastra-ai/mastra/pull/10879))
143
107
 
144
- Example usage:
108
+ - Adds native @ai-sdk/deepseek provider support instead of using the OpenAI-compatible fallback. ([#10822](https://github.com/mastra-ai/mastra/pull/10822))
145
109
 
146
110
  ```typescript
147
- // Works with OpenRouter
148
111
  const agent = new Agent({
149
- name: 'my-agent',
150
- instructions: 'You are a helpful assistant.',
151
- model: {
152
- id: 'openrouter/anthropic/claude-3-5-sonnet',
153
- headers: {
154
- 'HTTP-Referer': 'https://myapp.com',
155
- 'X-Title': 'My Application',
156
- },
157
- },
112
+ model: 'deepseek/deepseek-reasoner',
158
113
  });
159
114
 
160
- // Also works with custom providers
161
- const customAgent = new Agent({
162
- name: 'custom-agent',
163
- instructions: 'You are a helpful assistant.',
164
- model: {
165
- id: 'custom-provider/model',
166
- url: 'https://api.custom.com/v1',
167
- apiKey: 'key',
168
- headers: {
169
- 'X-Custom-Header': 'custom-value',
115
+ // With provider options for reasoning
116
+ const response = await agent.generate('Solve this problem', {
117
+ providerOptions: {
118
+ deepseek: {
119
+ thinking: { type: 'enabled' },
170
120
  },
171
121
  },
172
122
  });
173
123
  ```
174
124
 
175
- Fixes https://github.com/mastra-ai/mastra/issues/9760
176
-
177
- - fix(agent): persist messages before tool suspension ([#10369](https://github.com/mastra-ai/mastra/pull/10369))
125
+ Also updates the doc generation scripts so DeepSeek provider options show up in the generated docs.
178
126
 
179
- Fixes issues where thread and messages were not saved before suspension when tools require approval or call suspend() during execution. This caused conversation history to be lost if users refreshed during tool approval or suspension.
127
+ - Return state too if `includeState: true` is in `outputOptions` and workflow run is not successful ([#10806](https://github.com/mastra-ai/mastra/pull/10806))
180
128
 
181
- **Backend changes (@mastra/core):**
182
- - Add assistant messages to messageList immediately after LLM execution
183
- - Flush messages synchronously before suspension to persist state
184
- - Create thread if it doesn't exist before flushing
185
- - Add metadata helpers to persist and remove tool approval state
186
- - Pass saveQueueManager and memory context through workflow for immediate persistence
129
+ - feat: Add partial response support for agent and workflow list endpoints ([#10886](https://github.com/mastra-ai/mastra/pull/10886))
187
130
 
188
- **Frontend changes (@mastra/react):**
189
- - Extract runId from pending approvals to enable resumption after refresh
190
- - Convert `pendingToolApprovals` (DB format) to `requireApprovalMetadata` (runtime format)
191
- - Handle both `dynamic-tool` and `tool-{NAME}` part types for approval state
192
- - Change runId from hardcoded `agentId` to unique `uuid()`
131
+ Add optional `partial` query parameter to `/api/agents` and `/api/workflows` endpoints to return minimal data without schemas, reducing payload size for list views:
132
+ - When `partial=true`: tool schemas (inputSchema, outputSchema) are omitted
133
+ - When `partial=true`: workflow steps are replaced with stepCount integer
134
+ - When `partial=true`: workflow root schemas (inputSchema, outputSchema) are omitted
135
+ - Maintains backward compatibility when partial parameter is not provided
193
136
 
194
- **UI changes (@mastra/playground-ui):**
195
- - Handle tool calls awaiting approval in message initialization
196
- - Convert approval metadata format when loading initial messages
137
+ ## Server Endpoint Usage
197
138
 
198
- Fixes #9745, #9906
139
+ ```http
140
+ # Get partial agent data (no tool schemas)
141
+ GET /api/agents?partial=true
199
142
 
200
- - Update MockMemory to work with new storage API changes. MockMemory now properly implements all abstract MastraMemory methods. This includes proper thread management, message saving with MessageList conversion, working memory operations with scope support, and resource listing. ([#10368](https://github.com/mastra-ai/mastra/pull/10368))
143
+ # Get full agent data (default behavior)
144
+ GET /api/agents
201
145
 
202
- Add Zod v4 support for working memory schemas. Memory implementations now check for Zod v4's built-in `.toJsonSchema()` method before falling back to the `zodToJsonSchema` compatibility function, improving performance and forward compatibility while maintaining backward compatibility with Zod v3.
146
+ # Get partial workflow data (stepCount instead of steps, no schemas)
147
+ GET /api/workflows?partial=true
203
148
 
204
- Add Gemini 3 Pro test coverage in agent-gemini.test.ts to validate the latest Gemini model integration.
149
+ # Get full workflow data (default behavior)
150
+ GET /api/workflows
151
+ ```
205
152
 
206
- - Fix race condition in parallel tool stream writes ([#10463](https://github.com/mastra-ai/mastra/pull/10463))
153
+ ## Client SDK Usage
207
154
 
208
- Introduces a write queue to ToolStream to serialize access to the underlying stream, preventing writer locked errors
155
+ ```typescript
156
+ import { MastraClient } from '@mastra/client-js';
209
157
 
210
- - Remove unneeded console warning when flushing messages and no threadId or saveQueueManager is found. ([#10498](https://github.com/mastra-ai/mastra/pull/10498))
158
+ const client = new MastraClient({ baseUrl: 'http://localhost:4111' });
211
159
 
212
- - Add optional includeRawChunks parameter to agent execution options, ([#10456](https://github.com/mastra-ai/mastra/pull/10456))
213
- allowing users to include raw chunks in stream output where supported
214
- by the model provider.
160
+ // Get partial agent list (smaller payload)
161
+ const partialAgents = await client.listAgents({ partial: true });
215
162
 
216
- - When `mastra dev` runs, multiple processes can write to `provider-registry.json` concurrently (auto-refresh, syncGateways, syncGlobalCacheToLocal). This causes file corruption where the end of the JSON appears twice, making it unparseable. ([#10455](https://github.com/mastra-ai/mastra/pull/10455))
163
+ // Get full agent list with tool schemas
164
+ const fullAgents = await client.listAgents();
217
165
 
218
- The fix uses atomic writes via the write-to-temp-then-rename pattern. Instead of:
166
+ // Get partial workflow list (smaller payload)
167
+ const partialWorkflows = await client.listWorkflows({ partial: true });
219
168
 
220
- ```ts
221
- fs.writeFileSync(filePath, content, 'utf-8');
169
+ // Get full workflow list with steps and schemas
170
+ const fullWorkflows = await client.listWorkflows();
222
171
  ```
223
172
 
224
- We now do:
173
+ - Fix processInputStep so it runs correctly. ([#10909](https://github.com/mastra-ai/mastra/pull/10909))
225
174
 
226
- ```ts
227
- const tempPath = `${filePath}.${process.pid}.${Date.now()}.${randomSuffix}.tmp`;
228
- fs.writeFileSync(tempPath, content, 'utf-8');
229
- fs.renameSync(tempPath, filePath); // atomic on POSIX
230
- ```
175
+ - Updated dependencies [[`6c59a40`](https://github.com/mastra-ai/mastra/commit/6c59a40e0ad160467bd13d63a8a287028d75b02d), [`3076c67`](https://github.com/mastra-ai/mastra/commit/3076c6778b18988ae7d5c4c5c466366974b2d63f), [`0bada2f`](https://github.com/mastra-ai/mastra/commit/0bada2f2c1234932cf30c1c47a719ffb64b801c5), [`cc60ff6`](https://github.com/mastra-ai/mastra/commit/cc60ff616541a3b0fb531a7e469bf9ae7bb90528)]:
176
+ - @mastra/observability@1.0.0-beta.3
231
177
 
232
- `fs.rename()` is atomic on POSIX systems when both paths are on the same filesystem, so concurrent writes will each complete fully rather than interleaving.
178
+ ## 1.0.0-beta.6
233
179
 
234
- - Fix .map when placed at the beginning of a workflow or nested workflow ([#10457](https://github.com/mastra-ai/mastra/pull/10457))
180
+ ### Major Changes
235
181
 
236
- - Ensures that data chunks written via `writer.custom()` always bubble up directly to the top-level stream, even when nested in sub-agents. This allows tools to emit custom progress updates, metrics, and other data that can be consumed at any level of the agent hierarchy. ([#10309](https://github.com/mastra-ai/mastra/pull/10309))
237
- - **Added bubbling logic in sub-agent execution**: When sub-agents execute, data chunks (chunks with type starting with `data-`) are detected and written via `writer.custom()` instead of `writer.write()`, ensuring they bubble up directly without being wrapped in `tool-output` chunks.
238
- - **Added comprehensive tests**:
239
- - Test for `writer.custom()` with direct tool execution
240
- - Test for `writer.custom()` with sub-agent tools (nested execution)
241
- - Test for mixed usage of `writer.write()` and `writer.custom()` in the same tool
182
+ - Changed `.branch()` result schema to make all branch output fields optional. ([#10693](https://github.com/mastra-ai/mastra/pull/10693))
242
183
 
243
- When a sub-agent's tool uses `writer.custom()` to write data chunks, those chunks appear in the sub-agent's stream. The parent agent's execution logic now detects these chunks and uses `writer.custom()` to bubble them up directly, preserving their structure and making them accessible at the top level.
184
+ **Breaking change**: Branch outputs are now optional since only one branch executes at runtime. Update your workflow schemas to handle optional branch results.
244
185
 
245
- This ensures that:
246
- - Data chunks from tools always appear directly in the stream (not wrapped)
247
- - Data chunks bubble up correctly through nested agent hierarchies
248
- - Regular chunks continue to be wrapped in `tool-output` as expected
186
+ **Before:**
249
187
 
250
- - Update agent workflow and sub-agent tool transformations to accept more input arguments. ([#10278](https://github.com/mastra-ai/mastra/pull/10278))
188
+ ```typescript
189
+ const workflow = createWorkflow({...})
190
+ .branch([
191
+ [condition1, stepA], // outputSchema: { result: z.string() }
192
+ [condition2, stepB], // outputSchema: { data: z.number() }
193
+ ])
194
+ .map({
195
+ finalResult: { step: stepA, path: 'result' } // Expected non-optional
196
+ });
197
+ ```
251
198
 
252
- These tools now accept the following
199
+ **After:**
253
200
 
201
+ ```typescript
202
+ const workflow = createWorkflow({...})
203
+ .branch([
204
+ [condition1, stepA],
205
+ [condition2, stepB],
206
+ ])
207
+ .map({
208
+ finalResult: {
209
+ step: stepA,
210
+ path: 'result' // Now optional - provide fallback
211
+ }
212
+ });
254
213
  ```
255
- workflowTool.execute({ inputData, initialState }, context)
256
214
 
257
- agentTool.execute({ prompt, threadId, resourceId, instructions, maxSteps }, context)
258
- ```
215
+ **Why**: Branch conditionals execute only one path, so non-executed branches don't produce outputs. The type system now correctly reflects this runtime behavior.
259
216
 
260
- Workflow tools now also properly return errors when the workflow run fails
217
+ Related issue: https://github.com/mastra-ai/mastra/issues/10642
261
218
 
262
- ```
263
- const workflowResult = await workflowTool.execute({ inputData, initialState }, context)
219
+ ### Minor Changes
264
220
 
265
- console.log(workflowResult.error) // error msg if error
266
- console.log(workflowResult.result) // result of the workflow if success
267
- ```
221
+ - Memory system now uses processors. Memory processors (`MessageHistory`, `SemanticRecall`, `WorkingMemory`) are now exported from `@mastra/memory/processors` and automatically added to the agent pipeline based on your memory config. Core processors (`ToolCallFilter`, `TokenLimiter`) remain in `@mastra/core/processors`. ([#9254](https://github.com/mastra-ai/mastra/pull/9254))
268
222
 
269
- Workflows passed to agents do not properly handle suspend/resume`, they only handle success or error.
223
+ - Add reserved keys in RequestContext for secure resourceId/threadId setting from middleware ([#10657](https://github.com/mastra-ai/mastra/pull/10657))
270
224
 
271
- - Fixed OpenAI schema compatibility when using `agent.generate()` or `agent.stream()` with `structuredOutput`. ([#10366](https://github.com/mastra-ai/mastra/pull/10366))
225
+ This allows middleware to securely set `resourceId` and `threadId` via reserved keys in RequestContext (`MASTRA_RESOURCE_ID_KEY` and `MASTRA_THREAD_ID_KEY`), which take precedence over client-provided values for security.
272
226
 
273
- ## Changes
274
- - **Automatic transformation**: Zod schemas are now automatically transformed for OpenAI strict mode compatibility when using OpenAI models (including reasoning models like o1, o3, o4)
275
- - **Optional field handling**: `.optional()` fields are converted to `.nullable()` with a transform that converts `null` → `undefined`, preserving optional semantics while satisfying OpenAI's strict mode requirements
276
- - **Preserves nullable fields**: Intentionally `.nullable()` fields remain unchanged
277
- - **Deep transformation**: Handles `.optional()` fields at any nesting level (objects, arrays, unions, etc.)
278
- - **JSON Schema objects**: Not transformed, only Zod schemas
227
+ - feat(workflows): add suspendData parameter to step execute function ([#10734](https://github.com/mastra-ai/mastra/pull/10734))
279
228
 
280
- ## Example
229
+ Adds a new `suspendData` parameter to workflow step execute functions that provides access to the data originally passed to `suspend()` when the step was suspended. This enables steps to access context about why they were suspended when they are later resumed.
230
+
231
+ **New Features:**
232
+ - `suspendData` parameter automatically populated in step execute function when resuming
233
+ - Type-safe access to suspend data matching the step's `suspendSchema`
234
+ - Backward compatible - existing workflows continue to work unchanged
235
+
236
+ **Example:**
281
237
 
282
238
  ```typescript
283
- const agent = new Agent({
284
- name: 'data-extractor',
285
- model: { provider: 'openai', modelId: 'gpt-4o' },
286
- instructions: 'Extract user information',
239
+ const step = createStep({
240
+ suspendSchema: z.object({ reason: z.string() }),
241
+ resumeSchema: z.object({ approved: z.boolean() }),
242
+ execute: async ({ suspend, suspendData, resumeData }) => {
243
+ if (!resumeData?.approved) {
244
+ return await suspend({ reason: 'Approval required' });
245
+ }
246
+
247
+ // Access original suspend data when resuming
248
+ console.log(`Resuming after: ${suspendData?.reason}`);
249
+ return { result: 'Approved' };
250
+ },
287
251
  });
252
+ ```
288
253
 
289
- const schema = z.object({
290
- name: z.string(),
291
- age: z.number().optional(),
292
- deletedAt: z.date().nullable(),
293
- });
254
+ - feat(storage): support querying messages from multiple threads ([#10663](https://github.com/mastra-ai/mastra/pull/10663))
255
+ - Fixed TypeScript errors where `threadId: string | string[]` was being passed to places expecting `Scalar` type
256
+ - Added proper multi-thread support for `listMessages` across all adapters when `threadId` is an array
257
+ - Updated `_getIncludedMessages` to look up message threadId by ID (since message IDs are globally unique)
258
+ - **upstash**: Added `msg-idx:{messageId}` index for O(1) message lookups (backwards compatible with fallback to scan for old messages, with automatic backfill)
294
259
 
295
- // Schema is automatically transformed for OpenAI compatibility
296
- const result = await agent.generate('Extract: John, deleted yesterday', {
297
- structuredOutput: { schema },
298
- });
260
+ - Adds trace tagging support to the BrainTrust and Langfuse tracing exporters. ([#10765](https://github.com/mastra-ai/mastra/pull/10765))
299
261
 
300
- // Result: { name: 'John', age: undefined, deletedAt: null }
301
- ```
262
+ - Add `messageList` parameter to `processOutputStream` for accessing remembered messages during streaming ([#10608](https://github.com/mastra-ai/mastra/pull/10608))
302
263
 
303
- - Fix network data step formatting in AI SDK stream transformation ([#10432](https://github.com/mastra-ai/mastra/pull/10432))
264
+ - Unify transformScoreRow functions across storage adapters ([#10648](https://github.com/mastra-ai/mastra/pull/10648))
304
265
 
305
- Previously, network execution steps were not being tracked correctly in the AI SDK stream transformation. Steps were being duplicated rather than updated, and critical metadata like step IDs, iterations, and task information was missing or incorrectly structured.
266
+ Added a unified `transformScoreRow` function in `@mastra/core/storage` that provides schema-driven row transformation for score data. This eliminates code duplication across 10 storage adapters while maintaining store-specific behavior through configurable options:
267
+ - `preferredTimestampFields`: Preferred source fields for timestamps (PostgreSQL, Cloudflare D1)
268
+ - `convertTimestamps`: Convert timestamp strings to Date objects (MSSQL, MongoDB, ClickHouse)
269
+ - `nullValuePattern`: Skip values matching pattern (ClickHouse's `'_null_'`)
270
+ - `fieldMappings`: Map source column names to schema fields (LibSQL's `additionalLLMContext`)
306
271
 
307
- **Changes:**
308
- - Enhanced step tracking in `AgentNetworkToAISDKTransformer` to properly maintain step state throughout execution lifecycle
309
- - Steps are now identified by unique IDs and updated in place rather than creating duplicates
310
- - Added proper iteration and task metadata to each step in the network execution flow
311
- - Fixed agent, workflow, and tool execution events to correctly populate step data
312
- - Updated network stream event types to include `networkId`, `workflowId`, and consistent `runId` tracking
313
- - Added test coverage for network custom data chunks with comprehensive validation
272
+ Each store adapter now uses the unified function with appropriate options, reducing ~200 lines of duplicate transformation logic while ensuring consistent behavior across all storage backends.
314
273
 
315
- This ensures the AI SDK correctly represents the full execution flow of agent networks with accurate step sequencing and metadata.
274
+ ### Patch Changes
316
275
 
317
- - Fix generating provider-registry.json ([#10392](https://github.com/mastra-ai/mastra/pull/10392))
276
+ - dependencies updates: ([#10110](https://github.com/mastra-ai/mastra/pull/10110))
277
+ - Updated dependency [`hono-openapi@^1.1.1` ↗︎](https://www.npmjs.com/package/hono-openapi/v/1.1.1) (from `^0.4.8`, in `dependencies`)
318
278
 
319
- - Adds type inference for `mastra.get*ById` functions. Only those registered at the top level mastra instance will get inferred. MCP and tool id's do not get inferred yet, those need additional changes. ([#10199](https://github.com/mastra-ai/mastra/pull/10199))
279
+ - unexpected json parse issue, log error but dont fail ([#10241](https://github.com/mastra-ai/mastra/pull/10241))
320
280
 
321
- - Fix working memory zod to json schema conversion to use schema-compat zodtoJsonSchema fn. ([#10391](https://github.com/mastra-ai/mastra/pull/10391))
281
+ - Fixed a bug in agent networks where sometimes the task name was empty ([#10629](https://github.com/mastra-ai/mastra/pull/10629))
322
282
 
323
- - Fixes parallel tool call issue with Gemini 3 Pro by preventing step-start parts from being inserted between consecutive tool parts in the `addStartStepPartsForAIV5` function. This ensures that the AI SDK's `convertToModelMessages` correctly preserves the order of parallel tool calls and maintains the `thought_signature` on the first tool call as required by Gemini's API. ([#10372](https://github.com/mastra-ai/mastra/pull/10372))
283
+ - Adds `tool-result` and `tool-error` chunks to the processor.processOutputStream path. Processors now have access to these two chunks. ([#10645](https://github.com/mastra-ai/mastra/pull/10645))
324
284
 
325
- - Updated dependencies [[`bae33d9`](https://github.com/mastra-ai/mastra/commit/bae33d91a63fbb64d1e80519e1fc1acaed1e9013)]:
326
- - @mastra/schema-compat@1.0.0-beta.1
285
+ - Include `.input` in workflow results for both engines and remove the option to omit them from Inngest workflows. ([#10688](https://github.com/mastra-ai/mastra/pull/10688))
327
286
 
328
- ## 1.0.0-beta.4
287
+ - `getSpeakers` endpoint returns an empty array if voice is not configured on the agent and `getListeners` endpoint returns `{ enabled: false }` if voice is not figured on the agent. ([#10560](https://github.com/mastra-ai/mastra/pull/10560))
329
288
 
330
- ### Patch Changes
289
+ When no voice is set on agent don't throw error, by default set voice to undefined rather than DefaultVoice which throws errors when it is accessed.
331
290
 
332
- - Fix message list provider metadata handling and reasoning text optimization ([#10281](https://github.com/mastra-ai/mastra/pull/10281))
333
- - Improved provider metadata preservation across message transformations
334
- - Optimized reasoning text storage to avoid duplication (using `details` instead of `reasoning` field)
335
- - Fixed test snapshots for timestamp precision and metadata handling
291
+ - SimpleAuth and improved CloudAuth ([#10490](https://github.com/mastra-ai/mastra/pull/10490))
336
292
 
337
- - Allow provider to pass through options to the auth config ([#10284](https://github.com/mastra-ai/mastra/pull/10284))
293
+ - When LLMs like Claude Sonnet 4.5 and Gemini 2.4 call tools with all-optional parameters, they send `args: undefined` instead of `args: {}`. This caused validation to fail with "root: Required". ([#10728](https://github.com/mastra-ai/mastra/pull/10728))
338
294
 
339
- - Fix deprecation warning when agent network executes workflows by using `.fullStream` instead of iterating `WorkflowRunOutput` directly ([#10285](https://github.com/mastra-ai/mastra/pull/10285))
295
+ The fix normalizes `undefined`/`null` to `{}` for object schemas and `[]` for array schemas before validation.
340
296
 
341
- - Fix generate toolResults and mismatch in provider tool names ([#10282](https://github.com/mastra-ai/mastra/pull/10282))
297
+ - Fixed tool validation error messages so logs show Zod validation errors directly instead of hiding them inside structured JSON. ([#10579](https://github.com/mastra-ai/mastra/pull/10579))
342
298
 
343
- - Support AI SDK voice models ([#10304](https://github.com/mastra-ai/mastra/pull/10304))
299
+ - Fix error when spreading config objects in Mastra constructor ([#10718](https://github.com/mastra-ai/mastra/pull/10718))
344
300
 
345
- Mastra now supports AI SDK's transcription and speech models directly in `CompositeVoice`, enabling seamless integration with a wide range of voice providers through the AI SDK ecosystem. This allows you to use models from OpenAI, ElevenLabs, Groq, Deepgram, LMNT, Hume, and many more for both speech-to-text (transcription) and text-to-speech capabilities.
301
+ Adds validation guards to handle undefined/null values that can occur when config objects are spread (`{ ...config }`). Previously, if getters or non-enumerable properties resulted in undefined values during spread, the constructor would throw cryptic errors when accessing `.id` or `.name` on undefined objects.
346
302
 
347
- AI SDK models are automatically wrapped when passed to `CompositeVoice`, so you can mix and match AI SDK models with existing Mastra voice providers for maximum flexibility.
303
+ - Fix GPT-5/o3 reasoning models failing with "required reasoning item" errors when using memory with tools. Empty reasoning is now stored with providerMetadata to preserve OpenAI's item_reference. ([#10585](https://github.com/mastra-ai/mastra/pull/10585))
348
304
 
349
- ## Usage Example
305
+ - Fix generateTitle model type to accept AI SDK LanguageModelV2 ([#10541](https://github.com/mastra-ai/mastra/pull/10541))
350
306
 
351
- ```typescript
352
- import { CompositeVoice } from '@mastra/core/voice';
353
- import { openai } from '@ai-sdk/openai';
354
- import { elevenlabs } from '@ai-sdk/elevenlabs';
355
-
356
- // Use AI SDK models directly with CompositeVoice
357
- const voice = new CompositeVoice({
358
- input: openai.transcription('whisper-1'), // AI SDK transcription model
359
- output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech model
360
- });
307
+ Updated the `generateTitle.model` config option to accept `MastraModelConfig` instead of `MastraLanguageModel`. This allows users to pass raw AI SDK `LanguageModelV2` models (e.g., `anthropic.languageModel('claude-3-5-haiku-20241022')`) directly without type errors.
308
+
309
+ Previously, passing a standard `LanguageModelV2` would fail because `MastraLanguageModelV2` has different `doGenerate`/`doStream` return types. Now `MastraModelConfig` is used consistently across:
310
+ - `memory/types.ts` - `generateTitle.model` config
311
+ - `agent.ts` - `genTitle`, `generateTitleFromUserMessage`, `resolveTitleGenerationConfig`
312
+ - `agent-legacy.ts` - `AgentLegacyCapabilities` interface
313
+
314
+ - Fix message ordering when using toAISdkV5Messages or prepareStep ([#10686](https://github.com/mastra-ai/mastra/pull/10686))
361
315
 
362
- // Convert text to speech
363
- const audioStream = await voice.speak('Hello from AI SDK!');
316
+ Messages without `createdAt` timestamps were getting shuffled because they all received identical timestamps during conversion. Now messages are assigned monotonically increasing timestamps via `generateCreatedAt()`, preserving input order.
364
317
 
365
- // Convert speech to text
366
- const transcript = await voice.listen(audioStream);
367
- console.log(transcript);
318
+ Before:
319
+
320
+ ```
321
+ Input: [user: "hello", assistant: "Hi!", user: "bye"]
322
+ Output: [user: "bye", assistant: "Hi!", user: "hello"] // shuffled!
368
323
  ```
369
324
 
370
- Fixes #9947
325
+ After:
371
326
 
372
- ## 1.0.0-beta.3
327
+ ```
328
+ Input: [user: "hello", assistant: "Hi!", user: "bye"]
329
+ Output: [user: "hello", assistant: "Hi!", user: "bye"] // correct order
330
+ ```
373
331
 
374
- ### Major Changes
332
+ - Fix Scorer not using custom gateways registered with Mastra ([#10778](https://github.com/mastra-ai/mastra/pull/10778))
375
333
 
376
- - Use tool's outputSchema to validate results and return an error object if schema does not match output results. ([#9664](https://github.com/mastra-ai/mastra/pull/9664))
334
+ Scorers now have access to custom gateways when resolving models. Previously, calling `resolveModelConfig` in the scorer didn't pass the Mastra instance, so custom gateways were never available.
335
+
336
+ - Fix workflow run status not being updated from storage snapshot in createRun ([#10664](https://github.com/mastra-ai/mastra/pull/10664))
337
+
338
+ When createRun is called with an existing runId, it now correctly updates the run's status from the storage snapshot. This fixes the issue where different workflow instances (e.g., different API requests) would get a run with 'pending' status instead of the correct status from storage (e.g., 'suspended').
339
+
340
+ - Pass resourceId and threadId to network agent's subAgent when it has its own memory ([#10592](https://github.com/mastra-ai/mastra/pull/10592))
341
+
342
+ - use `agent.getMemory` to fetch the memory instance on the Agent class to make sure that storage gets set if memory doesn't set it itself. ([#10556](https://github.com/mastra-ai/mastra/pull/10556))
343
+
344
+ - Built-in processors that use internal agents (PromptInjectionDetector, ModerationProcessor, PIIDetector, LanguageDetector, StructuredOutputProcessor) now accept `providerOptions` to control model behavior. ([#10651](https://github.com/mastra-ai/mastra/pull/10651))
345
+
346
+ This lets you pass provider-specific settings like `reasoningEffort` for OpenAI thinking models:
377
347
 
378
348
  ```typescript
379
- const getUserTool = createTool({
380
- id: 'get-user',
381
- outputSchema: z.object({
382
- id: z.string(),
383
- name: z.string(),
384
- email: z.string().email(),
385
- }),
386
- execute: async inputData => {
387
- return { id: '123', name: 'John' };
349
+ const processor = new PromptInjectionDetector({
350
+ model: 'openai/o1-mini',
351
+ threshold: 0.7,
352
+ strategy: 'block',
353
+ providerOptions: {
354
+ openai: {
355
+ reasoningEffort: 'low',
356
+ },
388
357
  },
389
358
  });
390
359
  ```
391
360
 
392
- When validation fails, the tool returns a `ValidationError`:
361
+ - Improved typing for `workflow.then` to allow the provided steps `inputSchema` to be a subset of the previous steps `outputSchema`. Also errors if the provided steps `inputSchema` is a superset of the previous steps outputSchema. ([#10763](https://github.com/mastra-ai/mastra/pull/10763))
393
362
 
394
- ```typescript
395
- // Before v1 - invalid output would silently pass through
396
- await getUserTool.execute({});
397
- // { id: "123", name: "John" } - missing email
398
-
399
- // After v1 - validation error is returned
400
- await getUserTool.execute({});
401
- // {
402
- // error: true,
403
- // message: "Tool output validation failed for get-user. The tool returned invalid output:\n- email: Required\n\nReturned output: {...}",
404
- // validationErrors: { ... }
405
- // }
363
+ - Fix type issue with workflow `.parallel()` when passing multiple steps, one or more of which has a `resumeSchema` provided. ([#10708](https://github.com/mastra-ai/mastra/pull/10708))
364
+
365
+ - Adds bidirectional integration with otel tracing via a new @mastra/otel-bridge package. ([#10482](https://github.com/mastra-ai/mastra/pull/10482))
366
+
367
+ - Adds `processInputStep` method to the Processor interface. Unlike `processInput` which runs once at the start, this runs at each step of the agentic loop (including tool call continuations). ([#10650](https://github.com/mastra-ai/mastra/pull/10650))
368
+
369
+ ```ts
370
+ const processor: Processor = {
371
+ id: 'my-processor',
372
+ processInputStep: async ({ messages, messageList, stepNumber, systemMessages }) => {
373
+ // Transform messages at each step before LLM call
374
+ return messageList;
375
+ },
376
+ };
406
377
  ```
407
378
 
408
- ### Patch Changes
379
+ - When using output processors with `agent.generate()`, `result.text` was returning the unprocessed LLM response instead of the processed text. ([#10735](https://github.com/mastra-ai/mastra/pull/10735))
380
+
381
+ **Before:**
382
+
383
+ ```ts
384
+ const result = await agent.generate('hello');
385
+ result.text; // "hello world" (unprocessed)
386
+ result.response.messages[0].content[0].text; // "HELLO WORLD" (processed)
387
+ ```
388
+
389
+ **After:**
390
+
391
+ ```ts
392
+ const result = await agent.generate('hello');
393
+ result.text; // "HELLO WORLD" (processed)
394
+ ```
409
395
 
410
- - dependencies updates: ([#10131](https://github.com/mastra-ai/mastra/pull/10131))
411
- - Updated dependency [`hono@^4.10.5` ↗︎](https://www.npmjs.com/package/hono/v/4.10.5) (from `^4.9.7`, in `dependencies`)
396
+ The bug was caused by the `text` delayed promise being resolved twice - first correctly with the processed text, then overwritten with the unprocessed buffered text.
412
397
 
413
- - Only handle download image asset transformation if needed ([#10122](https://github.com/mastra-ai/mastra/pull/10122))
398
+ - Refactored default engine to fit durable execution better, and the inngest engine to match. ([#10627](https://github.com/mastra-ai/mastra/pull/10627))
399
+ Also fixes requestContext persistence by relying on inngest step memoization.
414
400
 
415
- - Add serializedStepGraph to runExecutionResult response ([#10004](https://github.com/mastra-ai/mastra/pull/10004))
401
+ Unifies some of the stepResults and error formats in both engines.
416
402
 
417
- - Fix tool outputSchema validation to allow unsupported Zod types like ZodTuple. The outputSchema is only used for internal validation and never sent to the LLM, so model compatibility checks are not needed. ([#9409](https://github.com/mastra-ai/mastra/pull/9409))
403
+ - Allow direct access to server app handle directly from Mastra instance. ([#10598](https://github.com/mastra-ai/mastra/pull/10598))
418
404
 
419
- - Fix vector definition to fix pinecone ([#10150](https://github.com/mastra-ai/mastra/pull/10150))
405
+ ```ts
406
+ // Before: HTTP request to localhost
407
+ const response = await fetch(`http://localhost:5000/api/tools`);
408
+
409
+ // After: Direct call via app.fetch()
410
+ const app = mastra.getServerApp<Hono>();
411
+ const response = await app.fetch(new Request('http://internal/api/tools'));
412
+ ```
413
+
414
+ - Added `mastra.getServerApp<T>()` to access the underlying Hono/Express app
415
+ - Added `mastra.getMastraServer()` and `mastra.setMastraServer()` for adapter access
416
+ - Added `MastraServerBase` class in `@mastra/core/server` for adapter implementations
417
+ - Server adapters now auto-register with Mastra in their constructor
418
+
419
+ - Fix network agent not getting `text-delta` from subAgent when `.stream` is used ([#10533](https://github.com/mastra-ai/mastra/pull/10533))
420
+
421
+ - Fix discriminatedUnion schema information lost when json schema is converted to zod ([#10500](https://github.com/mastra-ai/mastra/pull/10500))
420
422
 
421
- - fix resumeStream type to use resumeSchema ([#10202](https://github.com/mastra-ai/mastra/pull/10202))
423
+ - Fix writer.custom not working during workflow resume operations ([#10720](https://github.com/mastra-ai/mastra/pull/10720))
422
424
 
423
- - Add type bailed to workflowRunStatus ([#10091](https://github.com/mastra-ai/mastra/pull/10091))
425
+ When a workflow step is resumed, the writer parameter was not being properly passed through, causing writer.custom() calls to fail. This fix ensures the writableStream parameter is correctly passed to both run.resume() and run.start() calls in the workflow execution engine, allowing custom events to be emitted properly during resume operations.
424
426
 
425
- - default validate inputs to true in Workflow execute ([#10222](https://github.com/mastra-ai/mastra/pull/10222))
427
+ - Fix corrupted provider-registry.json file in global cache and regenerate corrupted files ([#10606](https://github.com/mastra-ai/mastra/pull/10606))
426
428
 
427
- - Add support for doGenerate in LanguageModelV2. This change fixes issues with OpenAI stream permissions. ([#10239](https://github.com/mastra-ai/mastra/pull/10239))
428
- - Added new abstraction over LanguageModelV2
429
+ - Fix TypeScript error when using Zod schemas in `defaultOptions.structuredOutput` ([#10710](https://github.com/mastra-ai/mastra/pull/10710))
429
430
 
430
- - Fix input tool validation when no inputSchema is provided ([#9941](https://github.com/mastra-ai/mastra/pull/9941))
431
+ Previously, defining `structuredOutput.schema` in `defaultOptions` would cause a TypeScript error because the type only accepted `undefined`. Now any valid `OutputSchema` is correctly accepted.
431
432
 
432
- - Adds ability to create custom `MastraModelGateway`'s that can be added to the `Mastra` class instance under the `gateways` property. Giving you typescript autocompletion in any model picker string. ([#10180](https://github.com/mastra-ai/mastra/pull/10180))
433
+ - Add support for `providerOptions` when defining tools. This allows developers to specify provider-specific configurations (like Anthropic's `cacheControl`) per tool. ([#10649](https://github.com/mastra-ai/mastra/pull/10649))
433
434
 
434
435
  ```typescript
435
- import { MastraModelGateway, type ProviderConfig } from '@mastra/core/llm';
436
- import { createOpenAICompatible } from '@ai-sdk/openai-compatible-v5';
437
- import type { LanguageModelV2 } from '@ai-sdk/provider-v5';
438
-
439
- class MyCustomGateway extends MastraModelGateway {
440
- readonly id = 'my-custom-gateway';
441
- readonly name = 'My Custom Gateway';
442
- readonly prefix = 'custom';
443
-
444
- async fetchProviders(): Promise<Record<string, ProviderConfig>> {
445
- return {
446
- 'my-provider': {
447
- name: 'My Provider',
448
- models: ['model-1', 'model-2'],
449
- apiKeyEnvVar: 'MY_API_KEY',
450
- gateway: this.id,
451
- },
452
- };
453
- }
454
-
455
- buildUrl(modelId: string, envVars?: Record<string, string>): string {
456
- return 'https://api.my-provider.com/v1';
457
- }
458
-
459
- async getApiKey(modelId: string): Promise<string> {
460
- const apiKey = process.env.MY_API_KEY;
461
- if (!apiKey) throw new Error('MY_API_KEY not set');
462
- return apiKey;
463
- }
464
-
465
- async resolveLanguageModel({
466
- modelId,
467
- providerId,
468
- apiKey,
469
- }: {
470
- modelId: string;
471
- providerId: string;
472
- apiKey: string;
473
- }): Promise<LanguageModelV2> {
474
- const baseURL = this.buildUrl(`${providerId}/${modelId}`);
475
- return createOpenAICompatible({
476
- name: providerId,
477
- apiKey,
478
- baseURL,
479
- }).chatModel(modelId);
480
- }
481
- }
482
-
483
- new Mastra({
484
- gateways: {
485
- myGateway: new MyCustomGateway(),
436
+ createTool({
437
+ id: 'my-tool',
438
+ providerOptions: {
439
+ anthropic: { cacheControl: { type: 'ephemeral' } },
486
440
  },
441
+ // ...
487
442
  });
488
443
  ```
489
444
 
490
- - Add an additional check to determine whether the model natively supports specific file types. Only download the file if the model does not support it natively. ([#9790](https://github.com/mastra-ai/mastra/pull/9790))
445
+ - Fixed OpenAI reasoning message merging so distinct reasoning items are no longer dropped when they share a message ID. Prevents downstream errors where a function call is missing its required "reasoning" item. See #9005. ([#10614](https://github.com/mastra-ai/mastra/pull/10614))
446
+
447
+ - Updated dependencies [[`103586c`](https://github.com/mastra-ai/mastra/commit/103586cb23ebcd2466c7f68a71674d37cc10e263), [`61a5705`](https://github.com/mastra-ai/mastra/commit/61a570551278b6743e64243b3ce7d73de915ca8a), [`db70a48`](https://github.com/mastra-ai/mastra/commit/db70a48aeeeeb8e5f92007e8ede52c364ce15287), [`f03ae60`](https://github.com/mastra-ai/mastra/commit/f03ae60500fe350c9d828621006cdafe1975fdd8)]:
448
+ - @mastra/observability@1.0.0-beta.2
449
+ - @mastra/schema-compat@1.0.0-beta.2
450
+
451
+ ## 1.0.0-beta.5
452
+
453
+ ### Patch Changes
454
+
455
+ - Add Azure OpenAI gateway ([#9990](https://github.com/mastra-ai/mastra/pull/9990))
491
456
 
492
- - Add restart method to workflow run that allows restarting an active workflow run ([#9750](https://github.com/mastra-ai/mastra/pull/9750))
493
- Add status filter to `listWorkflowRuns`
494
- Add automatic restart to restart active workflow runs when server starts
457
+ The Azure OpenAI gateway supports three configuration modes:
458
+ 1. **Static deployments**: Provide deployment names from Azure Portal
459
+ 2. **Dynamic discovery**: Query Azure Management API for available deployments
460
+ 3. **Manual**: Specify deployment names when creating agents
461
+
462
+ ## Usage
463
+
464
+ ```typescript
465
+ import { Mastra } from '@mastra/core';
466
+ import { AzureOpenAIGateway } from '@mastra/core/llm';
495
467
 
496
- - Validate schemas by default in workflow. Previously, if you want schemas in the workflow to be validated, you'd have to add `validateInputs` option, now, this will be done by default but can be disabled. ([#10186](https://github.com/mastra-ai/mastra/pull/10186))
468
+ // Static mode (recommended)
469
+ export const mastra = new Mastra({
470
+ gateways: [
471
+ new AzureOpenAIGateway({
472
+ resourceName: process.env.AZURE_RESOURCE_NAME!,
473
+ apiKey: process.env.AZURE_API_KEY!,
474
+ deployments: ['gpt-4-prod', 'gpt-35-turbo-dev'],
475
+ }),
476
+ ],
477
+ });
497
478
 
498
- For workflows whose schemas and step schemas you don't want validated, do this
479
+ // Dynamic discovery mode
480
+ export const mastra = new Mastra({
481
+ gateways: [
482
+ new AzureOpenAIGateway({
483
+ resourceName: process.env.AZURE_RESOURCE_NAME!,
484
+ apiKey: process.env.AZURE_API_KEY!,
485
+ management: {
486
+ tenantId: process.env.AZURE_TENANT_ID!,
487
+ clientId: process.env.AZURE_CLIENT_ID!,
488
+ clientSecret: process.env.AZURE_CLIENT_SECRET!,
489
+ subscriptionId: process.env.AZURE_SUBSCRIPTION_ID!,
490
+ resourceGroup: 'my-resource-group',
491
+ },
492
+ }),
493
+ ],
494
+ });
499
495
 
500
- ```diff
496
+ // Use Azure OpenAI models
497
+ const agent = new Agent({
498
+ model: 'azure-openai/gpt-4-deployment',
499
+ instructions: 'You are a helpful assistant',
500
+ });
501
501
 
502
- ... 5468 more lines hidden. See full changelog in package directory.
502
+ ... 5916 more lines hidden. See full changelog in package directory.