@mastra/core 1.11.0-alpha.2 → 1.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/CHANGELOG.md +330 -0
  2. package/dist/agent/index.cjs +8 -8
  3. package/dist/agent/index.js +1 -1
  4. package/dist/{chunk-7P77FWM6.js → chunk-7FVJIJGY.js} +3 -3
  5. package/dist/{chunk-7P77FWM6.js.map → chunk-7FVJIJGY.js.map} +1 -1
  6. package/dist/{chunk-MIAFERSL.js → chunk-7UM6FE6A.js} +8 -2
  7. package/dist/chunk-7UM6FE6A.js.map +1 -0
  8. package/dist/{chunk-CDXZJ3PO.cjs → chunk-E7PW4ZAB.cjs} +8 -2
  9. package/dist/chunk-E7PW4ZAB.cjs.map +1 -0
  10. package/dist/{chunk-L7NBGABG.cjs → chunk-FIQRYC7M.cjs} +4 -4
  11. package/dist/{chunk-L7NBGABG.cjs.map → chunk-FIQRYC7M.cjs.map} +1 -1
  12. package/dist/{chunk-V5SM5A4F.cjs → chunk-FN6ZOGJ7.cjs} +6 -6
  13. package/dist/{chunk-V5SM5A4F.cjs.map → chunk-FN6ZOGJ7.cjs.map} +1 -1
  14. package/dist/{chunk-RH5O4YBE.cjs → chunk-NJVIJQ4R.cjs} +3 -3
  15. package/dist/{chunk-RH5O4YBE.cjs.map → chunk-NJVIJQ4R.cjs.map} +1 -1
  16. package/dist/{chunk-6VSVQ62A.cjs → chunk-QAIBZ7GH.cjs} +195 -190
  17. package/dist/{chunk-6VSVQ62A.cjs.map → chunk-QAIBZ7GH.cjs.map} +1 -1
  18. package/dist/{chunk-JNNKF6W5.cjs → chunk-QGT2HJGX.cjs} +24 -24
  19. package/dist/{chunk-JNNKF6W5.cjs.map → chunk-QGT2HJGX.cjs.map} +1 -1
  20. package/dist/{chunk-J4V2WVFG.js → chunk-RF2EPUM4.js} +3 -3
  21. package/dist/{chunk-J4V2WVFG.js.map → chunk-RF2EPUM4.js.map} +1 -1
  22. package/dist/{chunk-VVX2O7XN.js → chunk-S5BMBKUR.js} +19 -14
  23. package/dist/{chunk-VVX2O7XN.js.map → chunk-S5BMBKUR.js.map} +1 -1
  24. package/dist/{chunk-XWMBFPD5.js → chunk-VW4MYD6N.js} +3 -3
  25. package/dist/{chunk-XWMBFPD5.js.map → chunk-VW4MYD6N.js.map} +1 -1
  26. package/dist/{chunk-WLS37KH3.js → chunk-W3LSSG4T.js} +3 -3
  27. package/dist/{chunk-WLS37KH3.js.map → chunk-W3LSSG4T.js.map} +1 -1
  28. package/dist/{chunk-QYYWWS3U.cjs → chunk-YTND5227.cjs} +2 -2
  29. package/dist/chunk-YTND5227.cjs.map +1 -0
  30. package/dist/{chunk-5FNG2ISL.js → chunk-YY3T5AJK.js} +2 -2
  31. package/dist/chunk-YY3T5AJK.js.map +1 -0
  32. package/dist/datasets/index.cjs +16 -16
  33. package/dist/datasets/index.js +1 -1
  34. package/dist/docs/SKILL.md +1 -1
  35. package/dist/docs/assets/SOURCE_MAP.json +310 -310
  36. package/dist/docs/references/reference-observability-tracing-interfaces.md +3 -0
  37. package/dist/evals/index.cjs +19 -19
  38. package/dist/evals/index.js +2 -2
  39. package/dist/evals/scoreTraces/index.cjs +7 -7
  40. package/dist/evals/scoreTraces/index.js +3 -3
  41. package/dist/harness/index.cjs +16 -16
  42. package/dist/harness/index.js +6 -6
  43. package/dist/index.cjs +2 -2
  44. package/dist/index.js +1 -1
  45. package/dist/integration/index.cjs +2 -2
  46. package/dist/integration/index.js +1 -1
  47. package/dist/llm/index.cjs +12 -12
  48. package/dist/llm/index.js +3 -3
  49. package/dist/loop/index.cjs +14 -14
  50. package/dist/loop/index.js +1 -1
  51. package/dist/mastra/index.cjs +2 -2
  52. package/dist/mastra/index.js +1 -1
  53. package/dist/memory/index.cjs +14 -14
  54. package/dist/memory/index.js +1 -1
  55. package/dist/netlify-BFMLDQM5.js +3 -0
  56. package/dist/{netlify-QDVWIV7X.js.map → netlify-BFMLDQM5.js.map} +1 -1
  57. package/dist/netlify-RN36CL4F.cjs +12 -0
  58. package/dist/{netlify-OZA4BKIK.cjs.map → netlify-RN36CL4F.cjs.map} +1 -1
  59. package/dist/observability/index.cjs +17 -17
  60. package/dist/observability/index.js +1 -1
  61. package/dist/observability/types/tracing.d.ts +2 -0
  62. package/dist/observability/types/tracing.d.ts.map +1 -1
  63. package/dist/processor-provider/index.cjs +10 -10
  64. package/dist/processor-provider/index.js +1 -1
  65. package/dist/processors/index.cjs +42 -42
  66. package/dist/processors/index.js +1 -1
  67. package/dist/provider-registry-6FITK73S.js +3 -0
  68. package/dist/{provider-registry-L3ZZWLYW.js.map → provider-registry-6FITK73S.js.map} +1 -1
  69. package/dist/provider-registry-IHJW2WNI.cjs +40 -0
  70. package/dist/{provider-registry-TLYLLTKR.cjs.map → provider-registry-IHJW2WNI.cjs.map} +1 -1
  71. package/dist/relevance/index.cjs +3 -3
  72. package/dist/relevance/index.js +1 -1
  73. package/dist/storage/constants.cjs +56 -56
  74. package/dist/storage/constants.js +1 -1
  75. package/dist/storage/index.cjs +172 -172
  76. package/dist/storage/index.js +2 -2
  77. package/dist/stream/index.cjs +8 -8
  78. package/dist/stream/index.js +1 -1
  79. package/dist/test-utils/llm-mock.cjs +4 -4
  80. package/dist/test-utils/llm-mock.js +1 -1
  81. package/dist/tool-loop-agent/index.cjs +4 -4
  82. package/dist/tool-loop-agent/index.js +1 -1
  83. package/dist/tools/index.cjs +6 -6
  84. package/dist/tools/index.js +1 -1
  85. package/dist/tools/is-vercel-tool.cjs +2 -2
  86. package/dist/tools/is-vercel-tool.js +1 -1
  87. package/dist/tools/tool-builder/builder.d.ts.map +1 -1
  88. package/dist/tools/tool.d.ts +6 -1
  89. package/dist/tools/tool.d.ts.map +1 -1
  90. package/dist/tools/types.d.ts +15 -0
  91. package/dist/tools/types.d.ts.map +1 -1
  92. package/dist/utils.cjs +23 -23
  93. package/dist/utils.js +1 -1
  94. package/dist/vector/index.cjs +7 -7
  95. package/dist/vector/index.js +1 -1
  96. package/dist/workflows/evented/index.cjs +10 -10
  97. package/dist/workflows/evented/index.js +1 -1
  98. package/dist/workflows/index.cjs +24 -24
  99. package/dist/workflows/index.js +1 -1
  100. package/dist/workspace/index.cjs +67 -67
  101. package/dist/workspace/index.js +1 -1
  102. package/package.json +10 -10
  103. package/dist/chunk-5FNG2ISL.js.map +0 -1
  104. package/dist/chunk-CDXZJ3PO.cjs.map +0 -1
  105. package/dist/chunk-MIAFERSL.js.map +0 -1
  106. package/dist/chunk-QYYWWS3U.cjs.map +0 -1
  107. package/dist/netlify-OZA4BKIK.cjs +0 -12
  108. package/dist/netlify-QDVWIV7X.js +0 -3
  109. package/dist/provider-registry-L3ZZWLYW.js +0 -3
  110. package/dist/provider-registry-TLYLLTKR.cjs +0 -40
package/CHANGELOG.md CHANGED
@@ -1,5 +1,335 @@
1
1
  # @mastra/core
2
2
 
3
+ ## 1.11.0
4
+
5
+ ### Minor Changes
6
+
7
+ - feat: support dynamic functions returning model fallback arrays ([#11975](https://github.com/mastra-ai/mastra/pull/11975))
8
+
9
+ Agents can now use dynamic functions that return entire fallback arrays based on runtime context. This enables:
10
+ - Dynamic selection of complete fallback configurations
11
+ - Context-based model selection with automatic fallback
12
+ - Flexible model routing based on user tier, region, or other factors
13
+ - Nested dynamic functions within returned arrays (each model in array can also be dynamic)
14
+
15
+ ## Examples
16
+
17
+ ### Basic dynamic fallback array
18
+
19
+ ```typescript
20
+ const agent = new Agent({
21
+ model: ({ requestContext }) => {
22
+ const tier = requestContext.get('tier');
23
+ if (tier === 'premium') {
24
+ return [
25
+ { model: 'openai/gpt-4', maxRetries: 2 },
26
+ { model: 'anthropic/claude-3-opus', maxRetries: 1 },
27
+ ];
28
+ }
29
+ return [{ model: 'openai/gpt-3.5-turbo', maxRetries: 1 }];
30
+ },
31
+ });
32
+ ```
33
+
34
+ ### Region-based routing with nested dynamics
35
+
36
+ ```typescript
37
+ const agent = new Agent({
38
+ model: ({ requestContext }) => {
39
+ const region = requestContext.get('region');
40
+ return [
41
+ {
42
+ model: ({ requestContext }) => {
43
+ // Select model variant based on region
44
+ return region === 'eu' ? 'openai/gpt-4-eu' : 'openai/gpt-4';
45
+ },
46
+ maxRetries: 2,
47
+ },
48
+ { model: 'anthropic/claude-3-opus', maxRetries: 1 },
49
+ ];
50
+ },
51
+ maxRetries: 1, // Agent-level default for models without explicit maxRetries
52
+ });
53
+ ```
54
+
55
+ ### Async dynamic selection
56
+
57
+ ```typescript
58
+ const agent = new Agent({
59
+ model: async ({ requestContext }) => {
60
+ // Fetch user's tier from database
61
+ const userId = requestContext.get('userId');
62
+ const user = await db.users.findById(userId);
63
+
64
+ if (user.tier === 'enterprise') {
65
+ return [
66
+ { model: 'openai/gpt-4', maxRetries: 3 },
67
+ { model: 'anthropic/claude-3-opus', maxRetries: 2 },
68
+ ];
69
+ }
70
+ return [{ model: 'openai/gpt-3.5-turbo', maxRetries: 1 }];
71
+ },
72
+ });
73
+ ```
74
+
75
+ ## Technical Details
76
+ - Functions can return `MastraModelConfig` (single model) or `ModelWithRetries[]` (array)
77
+ - Models without explicit `maxRetries` inherit agent-level `maxRetries` default
78
+ - Each model in returned array can also be a dynamic function for nested selection
79
+ - Empty arrays are validated and throw errors early
80
+ - Arrays are normalized to `ModelFallbacks` with all required fields filled in
81
+ - Performance optimization: Already-normalized arrays skip re-normalization
82
+
83
+ ## Fixes and Improvements
84
+ - Dynamic model fallbacks now properly inherit agent-level `maxRetries` when not explicitly specified
85
+ - `getModelList()` now correctly handles dynamic functions that return arrays
86
+ - Added validation for empty arrays returned from dynamic functions
87
+ - Added type guard optimization to prevent double normalization of static arrays
88
+ - Preserved backward-compatible `getLLM()` and `getModel()` return behavior while adding dynamic fallback array support
89
+ - Comprehensive test coverage for edge cases (async functions, nested dynamics, error handling)
90
+
91
+ ## Documentation
92
+ - Added dynamic fallback array example in Models docs under **Model fallbacks**
93
+
94
+ ## Migration Guide
95
+
96
+ No breaking changes. All existing model configurations continue to work:
97
+ - Static single models: `model: 'openai/gpt-4'`
98
+ - Static arrays: `model: [{ model: 'openai/gpt-4', maxRetries: 2 }]`
99
+ - Dynamic single: `model: ({ requestContext }) => 'openai/gpt-4'`
100
+ - Dynamic arrays (NEW): `model: ({ requestContext }) => [{ model: 'openai/gpt-4', maxRetries: 2 }]`
101
+
102
+ Closes #11951
103
+
104
+ - Added `onValidationError` hook to `ServerConfig` and `createRoute()`. When a request fails Zod schema validation (query parameters, request body, or path parameters), this hook lets you customize the error response — including the HTTP status code and response body — instead of the default 400 response. Set it on the server config to apply globally, or on individual routes to override per-route. All server adapters (Hono, Express, Fastify, Koa) support this hook. ([#13477](https://github.com/mastra-ai/mastra/pull/13477))
105
+
106
+ ```ts
107
+ const mastra = new Mastra({
108
+ server: {
109
+ onValidationError: (error, context) => ({
110
+ status: 422,
111
+ body: {
112
+ ok: false,
113
+ errors: error.issues.map(i => ({
114
+ path: i.path.join('.'),
115
+ message: i.message,
116
+ })),
117
+ source: context,
118
+ },
119
+ }),
120
+ },
121
+ });
122
+ ```
123
+
124
+ - Added `requestContext` field to tracing spans. Each span now automatically captures a snapshot of the active `RequestContext`, making request-scoped values like user IDs, tenant IDs, and feature flags available when viewing traces. ([#14020](https://github.com/mastra-ai/mastra/pull/14020))
125
+
126
+ - Added `allowedWorkspaceTools` to `HarnessSubagent`. Subagents now automatically inherit the parent agent's workspace. Use `allowedWorkspaceTools` to restrict which workspace tools a subagent can see: ([#13940](https://github.com/mastra-ai/mastra/pull/13940))
127
+
128
+ ```ts
129
+ const subagent: HarnessSubagent = {
130
+ id: 'explore',
131
+ name: 'Explore',
132
+ allowedWorkspaceTools: ['view', 'search_content', 'find_files'],
133
+ };
134
+ ```
135
+
136
+ - Enabled tracing for tool executions through mcp server ([#12804](https://github.com/mastra-ai/mastra/pull/12804))
137
+
138
+ Traces now appear in the Observability UI for MCP server tool calls
139
+
140
+ - Added `result` to `processOutputResult` args, providing resolved generation data (usage, text, steps, finishReason) directly. This replaces raw stream chunks with an easy-to-use `OutputResult` object containing the same data available in the `onFinish` callback. ([#13810](https://github.com/mastra-ai/mastra/pull/13810))
141
+
142
+ ```typescript
143
+ const usageProcessor: Processor = {
144
+ id: 'usage-processor',
145
+ processOutputResult({ result, messages }) {
146
+ console.log(`Text: ${result.text}`);
147
+ console.log(`Tokens: ${result.usage.inputTokens} in, ${result.usage.outputTokens} out`);
148
+ console.log(`Finish reason: ${result.finishReason}`);
149
+ console.log(`Steps: ${result.steps.length}`);
150
+ return messages;
151
+ },
152
+ };
153
+ ```
154
+
155
+ - Added `requestContext` support for dataset items and experiments. ([#13938](https://github.com/mastra-ai/mastra/pull/13938))
156
+
157
+ **Dataset items** now accept an optional `requestContext` field when adding or updating items. This lets you store per-item request context alongside inputs and ground truths.
158
+
159
+ **Datasets** now support a `requestContextSchema` field to describe the expected shape of request context on items.
160
+
161
+ **Experiments** now accept a `requestContext` option that gets passed through to `agent.generate()` during execution. Per-item request context merges with (and takes precedence over) the experiment-level context.
162
+
163
+ ```ts
164
+ // Add item with request context
165
+ await dataset.addItem({
166
+ input: messages,
167
+ groundTruth: expectedOutput,
168
+ requestContext: { userId: '123', locale: 'en' },
169
+ });
170
+
171
+ // Run experiment with global request context
172
+ await runExperiment(mastra, {
173
+ datasetId: 'my-dataset',
174
+ targetType: 'agent',
175
+ targetId: 'my-agent',
176
+ requestContext: { environment: 'staging' },
177
+ });
178
+ ```
179
+
180
+ - Add Zod v4 and Standard Schema support ([#12238](https://github.com/mastra-ai/mastra/pull/12238))
181
+
182
+ ## Zod v4 Breaking Changes
183
+ - Fix all `z.record()` calls to use 2-argument form (key + value schema) as required by Zod v4
184
+ - Update `ZodError.errors` to `ZodError.issues` (Zod v4 API change)
185
+ - Update `@ai-sdk/provider` versions for Zod v4 compatibility
186
+
187
+ ## Standard Schema Integration
188
+ - Add `packages/core/src/schema/` module that re-exports from `@mastra/schema-compat`
189
+ - Migrate codebase to use `PublicSchema` type for schema parameters
190
+ - Use `toStandardSchema()` for normalizing schemas across Zod v3, Zod v4, AI SDK Schema, and JSON Schema
191
+ - Use `standardSchemaToJSONSchema()` for JSON Schema conversion
192
+
193
+ ## Schema Compatibility (@mastra/schema-compat)
194
+ - Add new adapter exports: `@mastra/schema-compat/adapters/ai-sdk`, `@mastra/schema-compat/adapters/zod-v3`, `@mastra/schema-compat/adapters/json-schema`
195
+ - Enhance test coverage with separate v3 and v4 test suites
196
+ - Improve zod-to-json conversion with `unrepresentable: 'any'` support
197
+
198
+ ## TypeScript Fixes
199
+ - Resolve deep instantiation errors in client-js and model.ts
200
+ - Add proper type assertions where Zod v4 inference differs
201
+
202
+ **BREAKING CHANGE**: Minimum Zod version is now `^3.25.0` for v3 compatibility or `^4.0.0` for v4
203
+
204
+ ### Patch Changes
205
+
206
+ - dependencies updates: ([#14085](https://github.com/mastra-ai/mastra/pull/14085))
207
+ - Updated dependency [`ajv@^8.18.0` ↗︎](https://www.npmjs.com/package/ajv/v/8.18.0) (from `^8.17.1`, in `dependencies`)
208
+
209
+ - Update provider registry and model documentation with latest models and providers ([`332c014`](https://github.com/mastra-ai/mastra/commit/332c014e076b81edf7fe45b58205882726415e90))
210
+
211
+ - fix(workflows): add generic bail signature with overloads. The bail() function now uses method overloads - bail(result: TStepOutput) for backward compatibility and bail<T>(result: ...) for workflow type inference. This allows flexible early exits while maintaining type safety for workflow chaining. Runtime validation will be added in a follow-up. ([#12211](https://github.com/mastra-ai/mastra/pull/12211))
212
+
213
+ - Fixed structured output parsing when JSON string fields include fenced JSON examples. ([#13948](https://github.com/mastra-ai/mastra/pull/13948))
214
+
215
+ - Fixed `writer` being undefined in `processOutputStream` for all output processors. The root cause was that `processPart` in `ProcessorRunner` did not pass the `writer` to `executeWorkflowAsProcessor` in the outputStream phase. Since all user processors are wrapped into workflows via `combineProcessorsIntoWorkflow`, this meant no processor ever received a `writer`. Custom output processors (like guardrail processors) can now reliably use `writer.custom()` to emit stream events. ([#14111](https://github.com/mastra-ai/mastra/pull/14111))
216
+
217
+ - Added JSON repair for malformed tool call arguments from LLM providers. When an LLM (e.g., Kimi/K2) generates broken JSON for tool call arguments, Mastra now attempts to fix common errors (missing quotes on property names, single quotes, trailing commas) before falling back to undefined. This reduces silent tool execution failures caused by minor JSON formatting issues. See https://github.com/mastra-ai/mastra/issues/11078 ([#14033](https://github.com/mastra-ai/mastra/pull/14033))
218
+
219
+ - Fixed Windows shell command execution to avoid visible cmd.exe popups and broken output piping. ([#13886](https://github.com/mastra-ai/mastra/pull/13886))
220
+
221
+ - Fixed OpenAI reasoning models (e.g. gpt-5-mini) failing with "function*call was provided without its required reasoning item" when the agent loops back after a tool call. The issue was that `callProviderMetadata.openai` carrying `fc*\*`item IDs was not being stripped alongside reasoning parts, causing the AI SDK to send`item_reference`instead of inline`function_call` content. ([#14144](https://github.com/mastra-ai/mastra/pull/14144))
222
+
223
+ - Output processors can now inspect, modify, or block custom `data-*` chunks emitted by tools via `writer.custom()` during streaming. Processors must opt in by setting `processDataParts = true` to receive these chunks in `processOutputStream`. ([#13823](https://github.com/mastra-ai/mastra/pull/13823))
224
+
225
+ ```ts
226
+ class MyDataProcessor extends Processor {
227
+ processDataParts = true;
228
+
229
+ processOutputStream(part, { abort }) {
230
+ if (part.type === 'data-sensitive') {
231
+ abort('Blocked sensitive data');
232
+ }
233
+ return part;
234
+ }
235
+ }
236
+ ```
237
+
238
+ - Fixed agent tool calls not being surfaced in evented workflow streams. Added StreamChunkWriter abstraction and stream format configuration ('legacy' | 'vnext') to forward agent stream chunks through the workflow output stream. ([#12692](https://github.com/mastra-ai/mastra/pull/12692))
239
+
240
+ - Fixed OpenAI strict mode schema rejection when using agent networks with structured output. The compat layer was skipped when modelId was undefined, causing optional fields to be missing from the required array. (Fixes #12284) ([#13695](https://github.com/mastra-ai/mastra/pull/13695))
241
+
242
+ - Fixed `activeTools` to also enforce at execution time, not just at the model prompt. Tool calls to tools not in the active set are now rejected with a `ToolNotFoundError`. ([#13949](https://github.com/mastra-ai/mastra/pull/13949))
243
+
244
+ - Fix build failures on Windows when running `build:patch-commonjs` during `pnpm run setup` ([#14029](https://github.com/mastra-ai/mastra/pull/14029))
245
+
246
+ - Experiments now fail immediately with a clear error when triggered on a dataset with zero items, instead of getting stuck in "pending" status forever. The experiment trigger API returns HTTP 400 for empty datasets. Unexpected errors during async experiment setup are now logged and mark the experiment as failed. ([#14031](https://github.com/mastra-ai/mastra/pull/14031))
247
+
248
+ - fix: respect `lastMessages: false` in `recall()` to disable conversation history ([#12951](https://github.com/mastra-ai/mastra/pull/12951))
249
+
250
+ Setting `lastMessages: false` in Memory options now correctly prevents `recall()` from returning previous messages. Previously, the agent would retain the full conversation history despite this setting being disabled.
251
+
252
+ Callers can still pass `perPage: false` explicitly to `recall()` to retrieve all messages (e.g., for displaying thread history in a UI).
253
+
254
+ - `@mastra/core`: patch ([#14103](https://github.com/mastra-ai/mastra/pull/14103))
255
+
256
+ Fixed reasoning content being lost in multi-turn conversations with thinking models (kimi-k2.5, DeepSeek-R1) when using OpenAI-compatible providers (e.g., OpenRouter).
257
+
258
+ Previously, reasoning content could be discarded during streaming, causing 400 errors when the model tried to continue the conversation. Multi-turn conversations now preserve reasoning content correctly across all turns.
259
+
260
+ - fix(workflows): propagate logger to executionEngine ([#12517](https://github.com/mastra-ai/mastra/pull/12517))
261
+
262
+ When a custom logger is set on a Workflow via `__registerPrimitives` or `__setLogger`, it is now correctly propagated to the internal `executionEngine`. This ensures workflow step execution errors are logged through the custom logger instead of the default ConsoleLogger, enabling proper observability integration.
263
+
264
+ - Added permission denied handling for dataset pages. Datasets now show a "Permission Denied" screen when the user lacks access, matching the behavior of agents, workflows, and other resources. ([#13876](https://github.com/mastra-ai/mastra/pull/13876))
265
+
266
+ - Fixed stream freezing when using Anthropic's Programmatic Tool Calling (PTC). Streams that contain only tool-input streaming chunks (without explicit tool-call chunks) now correctly synthesize tool-call events and complete without hanging. See [#12390](https://github.com/mastra-ai/mastra/issues/12390). ([#12400](https://github.com/mastra-ai/mastra/pull/12400))
267
+
268
+ - Fixed subagents receiving parent's tool call/result parts in their context messages. On subsequent queries in a conversation, these references to tools the subagent doesn't have caused models (especially via custom gateways) to return blank or incorrect results. Parent delegation tool artifacts are now stripped from context before forwarding to subagents. ([#13927](https://github.com/mastra-ai/mastra/pull/13927))
269
+
270
+ - Memory now automatically creates btree indexes on `thread_id` and `resource_id` metadata fields when using PgVector. This prevents sequential scans on the `memory_messages` vector table, resolving performance issues under high load. ([#14034](https://github.com/mastra-ai/mastra/pull/14034))
271
+
272
+ Fixes #12109
273
+
274
+ - Clarified the `idGenerator` documentation to reflect the current context-aware function signature and documented the available `IdGeneratorContext` fields used for type-specific ID generation. ([#14081](https://github.com/mastra-ai/mastra/pull/14081))
275
+
276
+ - Reasoning content from OpenAI models is now stripped from conversation history before replaying it to the LLM, preventing API errors on follow-up messages while preserving reasoning data in the database. Fixes #12980. ([#13418](https://github.com/mastra-ai/mastra/pull/13418))
277
+
278
+ - Added `transient` option for data chunks to skip database persistence. Chunks marked as transient are streamed to the client for live display but not saved to storage, reducing bloat from large streaming outputs. ([#13869](https://github.com/mastra-ai/mastra/pull/13869))
279
+
280
+ ```ts
281
+ await context.writer?.custom({
282
+ type: 'data-my-stream',
283
+ data: { output: line },
284
+ transient: true,
285
+ });
286
+ ```
287
+
288
+ Workspace tools now use this to mark stdout/stderr streaming chunks as transient.
289
+
290
+ - Fixed message ID mismatch between generate/stream response and memory-stored messages. When an agent used memory, the message IDs returned in the response (e.g. `response.uiMessages[].id`) could differ from the IDs persisted to the database. This was caused by a format conversion that stripped message IDs during internal re-processing. Messages now retain their original IDs throughout the entire save pipeline. ([#13796](https://github.com/mastra-ai/mastra/pull/13796))
291
+
292
+ - Fixed assistant messages to persist `content.metadata.modelId` during streaming. ([#12969](https://github.com/mastra-ai/mastra/pull/12969))
293
+ This ensures stored and processed assistant messages keep the model identifier.
294
+ Developers can now reliably read `content.metadata.modelId` from downstream storage adapters and processors.
295
+
296
+ - Fixed `savePerStep: true` not actually persisting messages to storage during step execution. Previously, `onStepFinish` only accumulated messages in the in-memory MessageList but never flushed them to the storage backend. The only persistence path was `executeOnFinish`, which is skipped when the stream is aborted. Now messages are flushed to storage after each completed step, so they survive page refreshes and stream aborts. Fixes https://github.com/mastra-ai/mastra/issues/13984 ([#14030](https://github.com/mastra-ai/mastra/pull/14030))
297
+
298
+ - Fixed agentic loop continuing indefinitely when model hits max output tokens (finishReason: 'length'). Previously, only 'stop' and 'error' were treated as termination conditions, causing runaway token generation up to maxSteps when using structuredOutput with generate(). The loop now correctly stops on 'length' finish reason. Fixes #13012. ([#13861](https://github.com/mastra-ai/mastra/pull/13861))
299
+
300
+ - **Fixed tool-call arguments being silently lost when LLMs append internal tokens to JSON** ([#13400](https://github.com/mastra-ai/mastra/pull/13400))
301
+
302
+ LLMs (particularly via OpenRouter and OpenAI) sometimes append internal tokens like `<|call|>`, `<|endoftext|>`, or `<|end|>` to otherwise valid JSON in streamed tool-call arguments. Previously, these inputs would fail `JSON.parse` and the tool call would silently lose its arguments (set to `undefined`).
303
+
304
+ Now, `sanitizeToolCallInput` strips these token patterns before parsing, recovering valid data that was previously discarded. Valid JSON containing `<|...|>` inside string values is left untouched. Truly malformed JSON still gracefully returns `undefined`.
305
+
306
+ Fixes https://github.com/mastra-ai/mastra/issues/13185 and https://github.com/mastra-ai/mastra/issues/13261.
307
+
308
+ - Fixed agent loop stopping prematurely when LLM returns tool calls with finishReason 'stop'. Some models (e.g., OpenAI gpt-5.3-codex) return 'stop' even when tool calls are present, causing the agent to halt instead of processing tool results and continuing. The agent now correctly continues the loop whenever tool calls exist, regardless of finishReason. ([#14043](https://github.com/mastra-ai/mastra/pull/14043))
309
+
310
+ - **Fixed** ([#14133](https://github.com/mastra-ai/mastra/pull/14133))
311
+ - Prevented provider-executed tools from triggering extra loop iterations and duplicate requests.
312
+ - Preserved tool-call metadata during streaming so multi-turn conversations continue to work correctly with provider-executed tools.
313
+
314
+ - Fixed observational memory activation using outdated buffered observations in some long-running threads. Activation now uses the latest thread state so the correct observations are promoted. ([#13955](https://github.com/mastra-ai/mastra/pull/13955))
315
+
316
+ - Fixed model fallback retry behavior. Non-retryable errors (401, 403) are no longer retried on the same model before falling back. Retryable errors (429, 500) are now only retried by a single layer (p-retry) instead of being duplicated across two layers, preventing (maxRetries + 1)² total calls. The per-model maxRetries setting now correctly controls how many times p-retry retries on that specific model before the fallback loop moves to the next model. ([#14039](https://github.com/mastra-ai/mastra/pull/14039))
317
+
318
+ - Added processor-driven response message ID rotation so streamed assistant IDs use the rotated ID. ([#13887](https://github.com/mastra-ai/mastra/pull/13887))
319
+
320
+ Processors that run outside the agent loop no longer need synthetic response message IDs.
321
+
322
+ - Fixed a regression where dynamic `model` functions returning a single v1 model were treated as model arrays. ([#14018](https://github.com/mastra-ai/mastra/pull/14018))
323
+
324
+ - Fixed `requestContext` not being forwarded to tools dynamically added by input processors. ([#13827](https://github.com/mastra-ai/mastra/pull/13827))
325
+
326
+ - Added 'sandbox_access_request' to the HarnessEvent union type, enabling type-safe handling of sandbox access request events without requiring type casts. ([#13648](https://github.com/mastra-ai/mastra/pull/13648))
327
+
328
+ - Fix wrong threadId and resourceId being sent to subagent ([#13868](https://github.com/mastra-ai/mastra/pull/13868))
329
+
330
+ - Updated dependencies [[`fb58ce1`](https://github.com/mastra-ai/mastra/commit/fb58ce1de85d57f142005c4b3b7559f909167a3f), [`aae2295`](https://github.com/mastra-ai/mastra/commit/aae2295838a2d329ad6640829e87934790ffe5b8), [`17c4145`](https://github.com/mastra-ai/mastra/commit/17c4145166099354545582335b5252bdfdfd908b)]:
331
+ - @mastra/schema-compat@1.2.0
332
+
3
333
  ## 1.11.0-alpha.2
4
334
 
5
335
  ### Patch Changes
@@ -1,37 +1,37 @@
1
1
  'use strict';
2
2
 
3
- var chunk6VSVQ62A_cjs = require('../chunk-6VSVQ62A.cjs');
3
+ var chunkQAIBZ7GH_cjs = require('../chunk-QAIBZ7GH.cjs');
4
4
  var chunk6NZQ52RZ_cjs = require('../chunk-6NZQ52RZ.cjs');
5
5
 
6
6
 
7
7
 
8
8
  Object.defineProperty(exports, "Agent", {
9
9
  enumerable: true,
10
- get: function () { return chunk6VSVQ62A_cjs.Agent; }
10
+ get: function () { return chunkQAIBZ7GH_cjs.Agent; }
11
11
  });
12
12
  Object.defineProperty(exports, "TripWire", {
13
13
  enumerable: true,
14
- get: function () { return chunk6VSVQ62A_cjs.TripWire; }
14
+ get: function () { return chunkQAIBZ7GH_cjs.TripWire; }
15
15
  });
16
16
  Object.defineProperty(exports, "isSupportedLanguageModel", {
17
17
  enumerable: true,
18
- get: function () { return chunk6VSVQ62A_cjs.isSupportedLanguageModel; }
18
+ get: function () { return chunkQAIBZ7GH_cjs.isSupportedLanguageModel; }
19
19
  });
20
20
  Object.defineProperty(exports, "resolveThreadIdFromArgs", {
21
21
  enumerable: true,
22
- get: function () { return chunk6VSVQ62A_cjs.resolveThreadIdFromArgs; }
22
+ get: function () { return chunkQAIBZ7GH_cjs.resolveThreadIdFromArgs; }
23
23
  });
24
24
  Object.defineProperty(exports, "supportedLanguageModelSpecifications", {
25
25
  enumerable: true,
26
- get: function () { return chunk6VSVQ62A_cjs.supportedLanguageModelSpecifications; }
26
+ get: function () { return chunkQAIBZ7GH_cjs.supportedLanguageModelSpecifications; }
27
27
  });
28
28
  Object.defineProperty(exports, "tryGenerateWithJsonFallback", {
29
29
  enumerable: true,
30
- get: function () { return chunk6VSVQ62A_cjs.tryGenerateWithJsonFallback; }
30
+ get: function () { return chunkQAIBZ7GH_cjs.tryGenerateWithJsonFallback; }
31
31
  });
32
32
  Object.defineProperty(exports, "tryStreamWithJsonFallback", {
33
33
  enumerable: true,
34
- get: function () { return chunk6VSVQ62A_cjs.tryStreamWithJsonFallback; }
34
+ get: function () { return chunkQAIBZ7GH_cjs.tryStreamWithJsonFallback; }
35
35
  });
36
36
  Object.defineProperty(exports, "MessageList", {
37
37
  enumerable: true,
@@ -1,4 +1,4 @@
1
- export { Agent, TripWire, isSupportedLanguageModel, resolveThreadIdFromArgs, supportedLanguageModelSpecifications, tryGenerateWithJsonFallback, tryStreamWithJsonFallback } from '../chunk-VVX2O7XN.js';
1
+ export { Agent, TripWire, isSupportedLanguageModel, resolveThreadIdFromArgs, supportedLanguageModelSpecifications, tryGenerateWithJsonFallback, tryStreamWithJsonFallback } from '../chunk-S5BMBKUR.js';
2
2
  export { MessageList, TypeDetector, aiV5ModelMessageToV2PromptMessage, convertMessages } from '../chunk-WD467XGW.js';
3
3
  //# sourceMappingURL=index.js.map
4
4
  //# sourceMappingURL=index.js.map
@@ -1,4 +1,4 @@
1
- import { createTool } from './chunk-MIAFERSL.js';
1
+ import { createTool } from './chunk-7UM6FE6A.js';
2
2
  import { MastraBase } from './chunk-WCAFTXGK.js';
3
3
  import { RegisteredLogger } from './chunk-X2WMFSPB.js';
4
4
  import * as nodePath from 'path';
@@ -7987,5 +7987,5 @@ function createWorkspaceTools(workspace) {
7987
7987
  }
7988
7988
 
7989
7989
  export { BM25Index, CompositeFilesystem, CompositeVersionedSkillSource, DirectoryNotEmptyError, DirectoryNotFoundError, FileExistsError, FileNotFoundError, FileReadRequiredError, FilesystemError, FilesystemNotAvailableError, FilesystemNotMountableError, FilesystemNotReadyError, IsDirectoryError, IsolationUnavailableError, LocalFilesystem, LocalSandbox, LocalSkillSource, MastraFilesystem, MastraSandbox, MountError, MountManager, MountNotSupportedError, NotDirectoryError, PermissionError, ProcessHandle, SandboxError, SandboxExecutionError, SandboxFeatureNotSupportedError, SandboxNotAvailableError, SandboxNotReadyError, SandboxProcessManager, SandboxTimeoutError, SearchNotAvailableError, VersionedSkillSource, WORKSPACE_TOOLS, WORKSPACE_TOOLS_PREFIX, Workspace, WorkspaceError, WorkspaceNotAvailableError, WorkspaceNotReadyError, WorkspaceReadOnlyError, callLifecycle, collectSkillForPublish, createGlobMatcher, createSkillTools, createWorkspaceTools, deleteFileTool, detectIsolation, editFileTool, executeCommandTool, extractGlobBase, fileStatTool, getRecommendedIsolation, indexContentTool, isGlobPattern, isIsolationAvailable, listFilesTool, matchGlob, mkdirTool, publishSkillFromSource, readFileTool, requireFilesystem, requireSandbox, requireWorkspace, resolveToolConfig, searchTool, writeFileTool };
7990
- //# sourceMappingURL=chunk-7P77FWM6.js.map
7991
- //# sourceMappingURL=chunk-7P77FWM6.js.map
7990
+ //# sourceMappingURL=chunk-7FVJIJGY.js.map
7991
+ //# sourceMappingURL=chunk-7FVJIJGY.js.map