@mastra/mcp-docs-server 0.13.44 → 0.13.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fchangeset-cli.md +2 -0
- package/.docs/organized/changelogs/%40internal%2Fexternal-types.md +2 -0
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
- package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +251 -51
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +422 -222
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Freact.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +72 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +104 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +49 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
- package/.docs/organized/changelogs/create-mastra.md +201 -1
- package/.docs/organized/changelogs/mastra.md +201 -1
- package/.docs/raw/agents/adding-voice.mdx +49 -0
- package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
- package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
- package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
- package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
- package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +23 -1
- package/.docs/raw/reference/client-js/memory.mdx +43 -0
- package/.docs/raw/reference/core/mastra-class.mdx +8 -0
- package/.docs/raw/reference/core/mastra-model-gateway.mdx +223 -0
- package/.docs/raw/reference/scorers/answer-relevancy.mdx +28 -98
- package/.docs/raw/reference/scorers/answer-similarity.mdx +12 -258
- package/.docs/raw/reference/scorers/bias.mdx +29 -87
- package/.docs/raw/reference/scorers/completeness.mdx +32 -91
- package/.docs/raw/reference/scorers/content-similarity.mdx +29 -99
- package/.docs/raw/reference/scorers/context-precision.mdx +28 -130
- package/.docs/raw/reference/scorers/faithfulness.mdx +28 -101
- package/.docs/raw/reference/scorers/hallucination.mdx +28 -103
- package/.docs/raw/reference/scorers/keyword-coverage.mdx +28 -107
- package/.docs/raw/reference/scorers/textual-difference.mdx +27 -100
- package/.docs/raw/reference/scorers/tone-consistency.mdx +25 -98
- package/.docs/raw/reference/scorers/toxicity.mdx +29 -92
- package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
- package/.docs/raw/reference/storage/lance.mdx +33 -0
- package/.docs/raw/reference/storage/libsql.mdx +37 -0
- package/.docs/raw/reference/storage/mongodb.mdx +39 -0
- package/.docs/raw/reference/storage/mssql.mdx +37 -0
- package/.docs/raw/reference/storage/postgresql.mdx +37 -0
- package/.docs/raw/reference/streaming/agents/stream.mdx +7 -0
- package/.docs/raw/reference/voice/composite-voice.mdx +71 -28
- package/.docs/raw/reference/voice/voice.listen.mdx +86 -52
- package/.docs/raw/reference/voice/voice.speak.mdx +75 -40
- package/.docs/raw/voice/overview.mdx +67 -0
- package/.docs/raw/workflows/control-flow.mdx +180 -0
- package/CHANGELOG.md +20 -0
- package/dist/{chunk-TUAHUTTB.js → chunk-VE65X75W.js} +24 -4
- package/dist/prepare-docs/package-changes.d.ts.map +1 -1
- package/dist/prepare-docs/prepare.js +1 -1
- package/dist/stdio.js +1 -1
- package/package.json +5 -5
|
@@ -1,302 +1,502 @@
|
|
|
1
1
|
# @mastra/core
|
|
2
2
|
|
|
3
|
-
## 0.24.
|
|
3
|
+
## 0.24.6
|
|
4
4
|
|
|
5
5
|
### Patch Changes
|
|
6
6
|
|
|
7
|
-
- Fix
|
|
8
|
-
- Fix addStartStepPartsForAIV5 to prevent step-start parts from being inserted between consecutive tool parts (parallel tool calls)
|
|
9
|
-
- This ensures parallel tool calls maintain correct order and preserve thought_signature metadata on the first tool call as required by Gemini API
|
|
7
|
+
- Fix base64 encoded images with threads - issue #10480 ([#10566](https://github.com/mastra-ai/mastra/pull/10566))
|
|
10
8
|
|
|
11
|
-
|
|
9
|
+
Fixed "Invalid URL" error when using base64 encoded images (without `data:` prefix) in agent calls with threads and resources. Raw base64 strings are now automatically converted to proper data URIs before being processed.
|
|
12
10
|
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
-
|
|
28
|
-
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
- Allow provider to pass through options to the auth config ([#10290](https://github.com/mastra-ai/mastra/pull/10290))
|
|
56
|
-
|
|
57
|
-
- Fix deprecation warning when agent network executes workflows by using `.fullStream` instead of iterating `WorkflowRunOutput` directly ([#10306](https://github.com/mastra-ai/mastra/pull/10306))
|
|
58
|
-
|
|
59
|
-
- Fix generate toolResults and mismatch in provider tool names ([#10297](https://github.com/mastra-ai/mastra/pull/10297))
|
|
60
|
-
|
|
61
|
-
## 0.24.2
|
|
62
|
-
|
|
63
|
-
### Patch Changes
|
|
64
|
-
|
|
65
|
-
- Only handle download image asset transformation if needed ([#10245](https://github.com/mastra-ai/mastra/pull/10245))
|
|
66
|
-
|
|
67
|
-
- Fix tool outputSchema validation to allow unsupported Zod types like ZodTuple. The outputSchema is only used for internal validation and never sent to the LLM, so model compatibility checks are not needed. ([#10123](https://github.com/mastra-ai/mastra/pull/10123))
|
|
68
|
-
|
|
69
|
-
- Fix vector definition to fix pinecone ([#10179](https://github.com/mastra-ai/mastra/pull/10179))
|
|
11
|
+
**Changes:**
|
|
12
|
+
- Updated `attachments-to-parts.ts` to detect and convert raw base64 strings to data URIs
|
|
13
|
+
- Fixed `MessageList` image processing to handle raw base64 in two locations:
|
|
14
|
+
- Image part conversion in `aiV4CoreMessageToV1PromptMessage`
|
|
15
|
+
- File part to experimental_attachments conversion in `mastraDBMessageToAIV4UIMessage`
|
|
16
|
+
- Added comprehensive tests for base64 images, data URIs, and HTTP URLs with threads
|
|
17
|
+
|
|
18
|
+
**Breaking Change:** None - this is a bug fix that maintains backward compatibility while adding support for raw base64 strings.
|
|
19
|
+
|
|
20
|
+
- SimpleAuth and improved CloudAuth ([#10569](https://github.com/mastra-ai/mastra/pull/10569))
|
|
21
|
+
|
|
22
|
+
- Fixed OpenAI schema compatibility when using `agent.generate()` or `agent.stream()` with `structuredOutput`. ([#10454](https://github.com/mastra-ai/mastra/pull/10454))
|
|
23
|
+
|
|
24
|
+
## Changes
|
|
25
|
+
- **Automatic transformation**: Zod schemas are now automatically transformed for OpenAI strict mode compatibility when using OpenAI models (including reasoning models like o1, o3, o4)
|
|
26
|
+
- **Optional field handling**: `.optional()` fields are converted to `.nullable()` with a transform that converts `null` → `undefined`, preserving optional semantics while satisfying OpenAI's strict mode requirements
|
|
27
|
+
- **Preserves nullable fields**: Intentionally `.nullable()` fields remain unchanged
|
|
28
|
+
- **Deep transformation**: Handles `.optional()` fields at any nesting level (objects, arrays, unions, etc.)
|
|
29
|
+
- **JSON Schema objects**: Not transformed, only Zod schemas
|
|
30
|
+
|
|
31
|
+
## Example
|
|
32
|
+
|
|
33
|
+
```typescript
|
|
34
|
+
const agent = new Agent({
|
|
35
|
+
name: 'data-extractor',
|
|
36
|
+
model: { provider: 'openai', modelId: 'gpt-4o' },
|
|
37
|
+
instructions: 'Extract user information',
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
const schema = z.object({
|
|
41
|
+
name: z.string(),
|
|
42
|
+
age: z.number().optional(),
|
|
43
|
+
deletedAt: z.date().nullable(),
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
// Schema is automatically transformed for OpenAI compatibility
|
|
47
|
+
const result = await agent.generate('Extract: John, deleted yesterday', {
|
|
48
|
+
structuredOutput: { schema },
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
// Result: { name: 'John', age: undefined, deletedAt: null }
|
|
52
|
+
```
|
|
70
53
|
|
|
71
|
-
-
|
|
54
|
+
- deleteVectors, deleteFilter when upserting, updateVector filter (#10244) ([#10526](https://github.com/mastra-ai/mastra/pull/10526))
|
|
55
|
+
|
|
56
|
+
- Fix generateTitle model type to accept AI SDK LanguageModelV2 ([#10567](https://github.com/mastra-ai/mastra/pull/10567))
|
|
57
|
+
|
|
58
|
+
Updated the `generateTitle.model` config option to accept `MastraModelConfig` instead of `MastraLanguageModel`. This allows users to pass raw AI SDK `LanguageModelV2` models (e.g., `anthropic.languageModel('claude-3-5-haiku-20241022')`) directly without type errors.
|
|
59
|
+
|
|
60
|
+
Previously, passing a standard `LanguageModelV2` would fail because `MastraLanguageModelV2` has different `doGenerate`/`doStream` return types. Now `MastraModelConfig` is used consistently across:
|
|
61
|
+
- `memory/types.ts` - `generateTitle.model` config
|
|
62
|
+
- `agent.ts` - `genTitle`, `generateTitleFromUserMessage`, `resolveTitleGenerationConfig`
|
|
63
|
+
- `agent-legacy.ts` - `AgentLegacyCapabilities` interface
|
|
64
|
+
|
|
65
|
+
- Fix message metadata not persisting when using simple message format. Previously, custom metadata passed in messages (e.g., `{role: 'user', content: 'text', metadata: {userId: '123'}}`) was not being saved to the database. This occurred because the CoreMessage conversion path didn't preserve metadata fields. ([#10571](https://github.com/mastra-ai/mastra/pull/10571))
|
|
66
|
+
|
|
67
|
+
Now metadata is properly preserved for all message input formats:
|
|
68
|
+
- Simple CoreMessage format: `{role, content, metadata}`
|
|
69
|
+
- Full UIMessage format: `{role, content, parts, metadata}`
|
|
70
|
+
- AI SDK v5 ModelMessage format with metadata
|
|
71
|
+
|
|
72
|
+
Fixes #8556
|
|
73
|
+
|
|
74
|
+
- feat: Composite auth implementation ([#10486](https://github.com/mastra-ai/mastra/pull/10486))
|
|
75
|
+
|
|
76
|
+
- Fix requireApproval property being ignored for tools passed via toolsets, clientTools, and memoryTools parameters. The requireApproval flag now correctly propagates through all tool conversion paths, ensuring tools requiring approval will properly request user approval before execution. ([#10562](https://github.com/mastra-ai/mastra/pull/10562))
|
|
77
|
+
|
|
78
|
+
- Fix Azure Foundry rate limit handling for -1 values ([#10411](https://github.com/mastra-ai/mastra/pull/10411))
|
|
79
|
+
|
|
80
|
+
- Fix model headers not being passed through gateway system ([#10564](https://github.com/mastra-ai/mastra/pull/10564))
|
|
81
|
+
|
|
82
|
+
Previously, custom headers specified in `MastraModelConfig` were not being passed through the gateway system to model providers. This affected:
|
|
83
|
+
- OpenRouter (preventing activity tracking with `HTTP-Referer` and `X-Title`)
|
|
84
|
+
- Custom providers using custom URLs (headers not passed to `createOpenAICompatible`)
|
|
85
|
+
- Custom gateway implementations (headers not available in `resolveLanguageModel`)
|
|
86
|
+
|
|
87
|
+
Now headers are correctly passed through the entire gateway system:
|
|
88
|
+
- Base `MastraModelGateway` interface updated to accept headers
|
|
89
|
+
- `ModelRouterLanguageModel` passes headers from config to all gateways
|
|
90
|
+
- OpenRouter receives headers for activity tracking
|
|
91
|
+
- Custom URL providers receive headers via `createOpenAICompatible`
|
|
92
|
+
- Custom gateways can access headers in their `resolveLanguageModel` implementation
|
|
93
|
+
|
|
94
|
+
Example usage:
|
|
95
|
+
|
|
96
|
+
```typescript
|
|
97
|
+
// Works with OpenRouter
|
|
98
|
+
const agent = new Agent({
|
|
99
|
+
name: 'my-agent',
|
|
100
|
+
instructions: 'You are a helpful assistant.',
|
|
101
|
+
model: {
|
|
102
|
+
id: 'openrouter/anthropic/claude-3-5-sonnet',
|
|
103
|
+
headers: {
|
|
104
|
+
'HTTP-Referer': 'https://myapp.com',
|
|
105
|
+
'X-Title': 'My Application',
|
|
106
|
+
},
|
|
107
|
+
},
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
// Also works with custom providers
|
|
111
|
+
const customAgent = new Agent({
|
|
112
|
+
name: 'custom-agent',
|
|
113
|
+
instructions: 'You are a helpful assistant.',
|
|
114
|
+
model: {
|
|
115
|
+
id: 'custom-provider/model',
|
|
116
|
+
url: 'https://api.custom.com/v1',
|
|
117
|
+
apiKey: 'key',
|
|
118
|
+
headers: {
|
|
119
|
+
'X-Custom-Header': 'custom-value',
|
|
120
|
+
},
|
|
121
|
+
},
|
|
122
|
+
});
|
|
123
|
+
```
|
|
72
124
|
|
|
73
|
-
|
|
74
|
-
- Added new abstraction over LanguageModelV2
|
|
125
|
+
Fixes https://github.com/mastra-ai/mastra/issues/9760
|
|
75
126
|
|
|
76
|
-
|
|
127
|
+
- fix(agent): persist messages before tool suspension ([#10542](https://github.com/mastra-ai/mastra/pull/10542))
|
|
77
128
|
|
|
78
|
-
|
|
129
|
+
Fixes issues where thread and messages were not saved before suspension when tools require approval or call suspend() during execution. This caused conversation history to be lost if users refreshed during tool approval or suspension.
|
|
79
130
|
|
|
80
|
-
|
|
131
|
+
**Backend changes (@mastra/core):**
|
|
132
|
+
- Add assistant messages to messageList immediately after LLM execution
|
|
133
|
+
- Flush messages synchronously before suspension to persist state
|
|
134
|
+
- Create thread if it doesn't exist before flushing
|
|
135
|
+
- Add metadata helpers to persist and remove tool approval state
|
|
136
|
+
- Pass saveQueueManager and memory context through workflow for immediate persistence
|
|
81
137
|
|
|
82
|
-
|
|
138
|
+
**Frontend changes (@mastra/react):**
|
|
139
|
+
- Extract runId from pending approvals to enable resumption after refresh
|
|
140
|
+
- Convert `pendingToolApprovals` (DB format) to `requireApprovalMetadata` (runtime format)
|
|
141
|
+
- Handle both `dynamic-tool` and `tool-{NAME}` part types for approval state
|
|
142
|
+
- Change runId from hardcoded `agentId` to unique `uuid()`
|
|
83
143
|
|
|
84
|
-
|
|
144
|
+
**UI changes (@mastra/playground-ui):**
|
|
145
|
+
- Handle tool calls awaiting approval in message initialization
|
|
146
|
+
- Convert approval metadata format when loading initial messages
|
|
85
147
|
|
|
86
|
-
|
|
148
|
+
Fixes #9745, #9906
|
|
87
149
|
|
|
88
|
-
-
|
|
89
|
-
- Added new abstraction over LanguageModelV2
|
|
150
|
+
- Fix race condition in parallel tool stream writes ([#10481](https://github.com/mastra-ai/mastra/pull/10481))
|
|
90
151
|
|
|
91
|
-
|
|
152
|
+
Introduces a write queue to ToolStream to serialize access to the underlying stream, preventing writer locked errors
|
|
92
153
|
|
|
93
|
-
|
|
154
|
+
- Remove unneeded console warning when flushing messages and no threadId or saveQueueManager is found. ([#10542](https://github.com/mastra-ai/mastra/pull/10542))
|
|
94
155
|
|
|
95
|
-
-
|
|
156
|
+
- Fixes GPT-5 reasoning which was failing on subsequent tool calls with the error: ([#10489](https://github.com/mastra-ai/mastra/pull/10489))
|
|
96
157
|
|
|
97
|
-
|
|
158
|
+
```
|
|
159
|
+
Item 'fc_xxx' of type 'function_call' was provided without its required 'reasoning' item: 'rs_xxx'
|
|
160
|
+
```
|
|
98
161
|
|
|
99
|
-
-
|
|
162
|
+
- Add optional includeRawChunks parameter to agent execution options, ([#10459](https://github.com/mastra-ai/mastra/pull/10459))
|
|
163
|
+
allowing users to include raw chunks in stream output where supported
|
|
164
|
+
by the model provider.
|
|
100
165
|
|
|
101
|
-
-
|
|
166
|
+
- When `mastra dev` runs, multiple processes can write to `provider-registry.json` concurrently (auto-refresh, syncGateways, syncGlobalCacheToLocal). This causes file corruption where the end of the JSON appears twice, making it unparseable. ([#10529](https://github.com/mastra-ai/mastra/pull/10529))
|
|
102
167
|
|
|
103
|
-
|
|
168
|
+
The fix uses atomic writes via the write-to-temp-then-rename pattern. Instead of:
|
|
104
169
|
|
|
105
|
-
|
|
170
|
+
```ts
|
|
171
|
+
fs.writeFileSync(filePath, content, 'utf-8');
|
|
172
|
+
```
|
|
106
173
|
|
|
107
|
-
|
|
108
|
-
- Fixed iteration counter logic in `loop/network/index.ts` from `(inputData.iteration ? inputData.iteration : -1) + 1` to `(inputData.iteration ?? -1) + 1`
|
|
109
|
-
- Changed initial iteration value from `0` to `-1` so first iteration correctly starts at 0
|
|
110
|
-
- Added `checkIterations()` helper to validate iteration counting in all network tests
|
|
174
|
+
We now do:
|
|
111
175
|
|
|
112
|
-
|
|
176
|
+
```ts
|
|
177
|
+
const tempPath = `${filePath}.${process.pid}.${Date.now()}.${randomSuffix}.tmp`;
|
|
178
|
+
fs.writeFileSync(tempPath, content, 'utf-8');
|
|
179
|
+
fs.renameSync(tempPath, filePath); // atomic on POSIX
|
|
180
|
+
```
|
|
113
181
|
|
|
114
|
-
|
|
182
|
+
`fs.rename()` is atomic on POSIX systems when both paths are on the same filesystem, so concurrent writes will each complete fully rather than interleaving.
|
|
183
|
+
|
|
184
|
+
- Ensures that data chunks written via `writer.custom()` always bubble up directly to the top-level stream, even when nested in sub-agents. This allows tools to emit custom progress updates, metrics, and other data that can be consumed at any level of the agent hierarchy. ([#10523](https://github.com/mastra-ai/mastra/pull/10523))
|
|
185
|
+
- **Added bubbling logic in sub-agent execution**: When sub-agents execute, data chunks (chunks with type starting with `data-`) are detected and written via `writer.custom()` instead of `writer.write()`, ensuring they bubble up directly without being wrapped in `tool-output` chunks.
|
|
186
|
+
- **Added comprehensive tests**:
|
|
187
|
+
- Test for `writer.custom()` with direct tool execution
|
|
188
|
+
- Test for `writer.custom()` with sub-agent tools (nested execution)
|
|
189
|
+
- Test for mixed usage of `writer.write()` and `writer.custom()` in the same tool
|
|
190
|
+
|
|
191
|
+
When a sub-agent's tool uses `writer.custom()` to write data chunks, those chunks appear in the sub-agent's stream. The parent agent's execution logic now detects these chunks and uses `writer.custom()` to bubble them up directly, preserving their structure and making them accessible at the top level.
|
|
192
|
+
|
|
193
|
+
This ensures that:
|
|
194
|
+
- Data chunks from tools always appear directly in the stream (not wrapped)
|
|
195
|
+
- Data chunks bubble up correctly through nested agent hierarchies
|
|
196
|
+
- Regular chunks continue to be wrapped in `tool-output` as expected
|
|
197
|
+
|
|
198
|
+
- Adds ability to create custom `MastraModelGateway`'s that can be added to the `Mastra` class instance under the `gateways` property. Giving you typescript autocompletion in any model picker string. ([#10535](https://github.com/mastra-ai/mastra/pull/10535))
|
|
199
|
+
|
|
200
|
+
```typescript
|
|
201
|
+
import { MastraModelGateway, type ProviderConfig } from '@mastra/core/llm';
|
|
202
|
+
import { createOpenAICompatible } from '@ai-sdk/openai-compatible';
|
|
203
|
+
import type { LanguageModelV2 } from '@ai-sdk/provider';
|
|
204
|
+
|
|
205
|
+
class MyCustomGateway extends MastraModelGateway {
|
|
206
|
+
readonly id = 'custom';
|
|
207
|
+
readonly name = 'My Custom Gateway';
|
|
208
|
+
|
|
209
|
+
async fetchProviders(): Promise<Record<string, ProviderConfig>> {
|
|
210
|
+
return {
|
|
211
|
+
'my-provider': {
|
|
212
|
+
name: 'My Provider',
|
|
213
|
+
models: ['model-1', 'model-2'],
|
|
214
|
+
apiKeyEnvVar: 'MY_API_KEY',
|
|
215
|
+
gateway: this.id,
|
|
216
|
+
},
|
|
217
|
+
};
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
buildUrl(modelId: string, envVars?: Record<string, string>): string {
|
|
221
|
+
return 'https://api.my-provider.com/v1';
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
async getApiKey(modelId: string): Promise<string> {
|
|
225
|
+
const apiKey = process.env.MY_API_KEY;
|
|
226
|
+
if (!apiKey) throw new Error('MY_API_KEY not set');
|
|
227
|
+
return apiKey;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
async resolveLanguageModel({
|
|
231
|
+
modelId,
|
|
232
|
+
providerId,
|
|
233
|
+
apiKey,
|
|
234
|
+
}: {
|
|
235
|
+
modelId: string;
|
|
236
|
+
providerId: string;
|
|
237
|
+
apiKey: string;
|
|
238
|
+
}): Promise<LanguageModelV2> {
|
|
239
|
+
const baseURL = this.buildUrl(`${providerId}/${modelId}`);
|
|
240
|
+
return createOpenAICompatible({
|
|
241
|
+
name: providerId,
|
|
242
|
+
apiKey,
|
|
243
|
+
baseURL,
|
|
244
|
+
}).chatModel(modelId);
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
new Mastra({
|
|
249
|
+
gateways: {
|
|
250
|
+
myGateway: new MyCustomGateway(),
|
|
251
|
+
},
|
|
252
|
+
});
|
|
253
|
+
```
|
|
115
254
|
|
|
116
|
-
-
|
|
255
|
+
- Support AI SDK voice models ([#10558](https://github.com/mastra-ai/mastra/pull/10558))
|
|
117
256
|
|
|
118
|
-
|
|
257
|
+
Mastra now supports AI SDK's transcription and speech models directly in `CompositeVoice`, enabling seamless integration with a wide range of voice providers through the AI SDK ecosystem. This allows you to use models from OpenAI, ElevenLabs, Groq, Deepgram, LMNT, Hume, and many more for both speech-to-text (transcription) and text-to-speech capabilities.
|
|
119
258
|
|
|
120
|
-
|
|
259
|
+
AI SDK models are automatically wrapped when passed to `CompositeVoice`, so you can mix and match AI SDK models with existing Mastra voice providers for maximum flexibility.
|
|
121
260
|
|
|
122
|
-
##
|
|
261
|
+
## Usage Example
|
|
123
262
|
|
|
124
|
-
|
|
263
|
+
```typescript
|
|
264
|
+
import { CompositeVoice } from '@mastra/core/voice';
|
|
265
|
+
import { openai } from '@ai-sdk/openai';
|
|
266
|
+
import { elevenlabs } from '@ai-sdk/elevenlabs';
|
|
125
267
|
|
|
126
|
-
|
|
268
|
+
// Use AI SDK models directly with CompositeVoice
|
|
269
|
+
const voice = new CompositeVoice({
|
|
270
|
+
input: openai.transcription('whisper-1'), // AI SDK transcription model
|
|
271
|
+
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech model
|
|
272
|
+
});
|
|
127
273
|
|
|
128
|
-
|
|
274
|
+
// Convert text to speech
|
|
275
|
+
const audioStream = await voice.speak('Hello from AI SDK!');
|
|
129
276
|
|
|
130
|
-
|
|
277
|
+
// Convert speech to text
|
|
278
|
+
const transcript = await voice.listen(audioStream);
|
|
279
|
+
console.log(transcript);
|
|
280
|
+
```
|
|
131
281
|
|
|
132
|
-
|
|
282
|
+
Fixes #9947
|
|
133
283
|
|
|
134
|
-
- Fix
|
|
284
|
+
- Fix network data step formatting in AI SDK stream transformation ([#10525](https://github.com/mastra-ai/mastra/pull/10525))
|
|
135
285
|
|
|
136
|
-
|
|
286
|
+
Previously, network execution steps were not being tracked correctly in the AI SDK stream transformation. Steps were being duplicated rather than updated, and critical metadata like step IDs, iterations, and task information was missing or incorrectly structured.
|
|
137
287
|
|
|
138
288
|
**Changes:**
|
|
139
|
-
-
|
|
140
|
-
-
|
|
141
|
-
- Added
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
- Revert download supported files for now ([`16a324f`](https://github.com/mastra-ai/mastra/commit/16a324f8c30a07d0d899bc2e4e7998c6b40a4cb6))
|
|
289
|
+
- Enhanced step tracking in `AgentNetworkToAISDKTransformer` to properly maintain step state throughout execution lifecycle
|
|
290
|
+
- Steps are now identified by unique IDs and updated in place rather than creating duplicates
|
|
291
|
+
- Added proper iteration and task metadata to each step in the network execution flow
|
|
292
|
+
- Fixed agent, workflow, and tool execution events to correctly populate step data
|
|
293
|
+
- Updated network stream event types to include `networkId`, `workflowId`, and consistent `runId` tracking
|
|
294
|
+
- Added test coverage for network custom data chunks with comprehensive validation
|
|
146
295
|
|
|
147
|
-
|
|
296
|
+
This ensures the AI SDK correctly represents the full execution flow of agent networks with accurate step sequencing and metadata.
|
|
148
297
|
|
|
149
|
-
- Fix
|
|
298
|
+
- Fix generating provider-registry.json ([#10535](https://github.com/mastra-ai/mastra/pull/10535))
|
|
150
299
|
|
|
151
|
-
-
|
|
300
|
+
- Fix message-list conversion issues when persisting messages before tool suspension: filter internal metadata fields (`__originalContent`) from UI messages, keep reasoning field empty for consistent cache keys during message deduplication, and only include providerMetadata on parts when defined. ([#10552](https://github.com/mastra-ai/mastra/pull/10552))
|
|
152
301
|
|
|
153
|
-
|
|
302
|
+
- Fix agent.generate() to use model's doGenerate method instead of doStream ([#10572](https://github.com/mastra-ai/mastra/pull/10572))
|
|
154
303
|
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
- update peerdeps ([`5ca1cca`](https://github.com/mastra-ai/mastra/commit/5ca1ccac61ffa7141e6d9fa8f22d3ad4d03bf5dc))
|
|
158
|
-
|
|
159
|
-
- Fix workflow input property preservation after resume from snapshot ([#9527](https://github.com/mastra-ai/mastra/pull/9527))
|
|
160
|
-
|
|
161
|
-
Ensure that when resuming a workflow from a snapshot, the input property is correctly set from the snapshot's context input rather than from resume data. This prevents the loss of original workflow input data during suspend/resume cycles.
|
|
162
|
-
|
|
163
|
-
- Fix a bug where streaming didn't output the final chunk ([#9726](https://github.com/mastra-ai/mastra/pull/9726))
|
|
164
|
-
|
|
165
|
-
- Fixes issue where clicking the reset button in the model picker would fail to restore the original LanguageModelV2 (or any other types) object that was passed during agent construction. ([#9487](https://github.com/mastra-ai/mastra/pull/9487))
|
|
166
|
-
|
|
167
|
-
- Fix network routing agent smoothstreaming ([#9727](https://github.com/mastra-ai/mastra/pull/9727))
|
|
168
|
-
|
|
169
|
-
- Updated dependencies [[`5ca1cca`](https://github.com/mastra-ai/mastra/commit/5ca1ccac61ffa7141e6d9fa8f22d3ad4d03bf5dc)]:
|
|
170
|
-
- @mastra/schema-compat@0.11.7
|
|
171
|
-
|
|
172
|
-
## 0.24.0-alpha.0
|
|
173
|
-
|
|
174
|
-
### Patch Changes
|
|
304
|
+
When calling `agent.generate()`, the model's `doGenerate` method is now correctly invoked instead of always using `doStream`. This aligns the non-streaming generation path with the intended behavior where providers can implement optimized non-streaming responses.
|
|
175
305
|
|
|
176
|
-
-
|
|
306
|
+
- Updated dependencies [[`33a607a`](https://github.com/mastra-ai/mastra/commit/33a607a1f716c2029d4a1ff1603dd756129a33b3)]:
|
|
307
|
+
- @mastra/schema-compat@0.11.8
|
|
177
308
|
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
Ensure that when resuming a workflow from a snapshot, the input property is correctly set from the snapshot's context input rather than from resume data. This prevents the loss of original workflow input data during suspend/resume cycles.
|
|
181
|
-
|
|
182
|
-
- Fix a bug where streaming didn't output the final chunk ([#9726](https://github.com/mastra-ai/mastra/pull/9726))
|
|
183
|
-
|
|
184
|
-
- Fixes issue where clicking the reset button in the model picker would fail to restore the original LanguageModelV2 (or any other types) object that was passed during agent construction. ([#9487](https://github.com/mastra-ai/mastra/pull/9487))
|
|
185
|
-
|
|
186
|
-
- Fix network routing agent smoothstreaming ([#9727](https://github.com/mastra-ai/mastra/pull/9727))
|
|
187
|
-
|
|
188
|
-
- Updated dependencies [[`5ca1cca`](https://github.com/mastra-ai/mastra/commit/5ca1ccac61ffa7141e6d9fa8f22d3ad4d03bf5dc)]:
|
|
189
|
-
- @mastra/schema-compat@0.11.7-alpha.0
|
|
190
|
-
|
|
191
|
-
## 0.23.3
|
|
309
|
+
## 0.24.6-alpha.0
|
|
192
310
|
|
|
193
311
|
### Patch Changes
|
|
194
312
|
|
|
195
|
-
-
|
|
313
|
+
- Fix base64 encoded images with threads - issue #10480 ([#10566](https://github.com/mastra-ai/mastra/pull/10566))
|
|
196
314
|
|
|
197
|
-
|
|
315
|
+
Fixed "Invalid URL" error when using base64 encoded images (without `data:` prefix) in agent calls with threads and resources. Raw base64 strings are now automatically converted to proper data URIs before being processed.
|
|
198
316
|
|
|
317
|
+
**Changes:**
|
|
318
|
+
- Updated `attachments-to-parts.ts` to detect and convert raw base64 strings to data URIs
|
|
319
|
+
- Fixed `MessageList` image processing to handle raw base64 in two locations:
|
|
320
|
+
- Image part conversion in `aiV4CoreMessageToV1PromptMessage`
|
|
321
|
+
- File part to experimental_attachments conversion in `mastraDBMessageToAIV4UIMessage`
|
|
322
|
+
- Added comprehensive tests for base64 images, data URIs, and HTTP URLs with threads
|
|
323
|
+
|
|
324
|
+
**Breaking Change:** None - this is a bug fix that maintains backward compatibility while adding support for raw base64 strings.
|
|
325
|
+
|
|
326
|
+
- SimpleAuth and improved CloudAuth ([#10569](https://github.com/mastra-ai/mastra/pull/10569))
|
|
327
|
+
|
|
328
|
+
- Fixed OpenAI schema compatibility when using `agent.generate()` or `agent.stream()` with `structuredOutput`. ([#10454](https://github.com/mastra-ai/mastra/pull/10454))
|
|
329
|
+
|
|
330
|
+
## Changes
|
|
331
|
+
- **Automatic transformation**: Zod schemas are now automatically transformed for OpenAI strict mode compatibility when using OpenAI models (including reasoning models like o1, o3, o4)
|
|
332
|
+
- **Optional field handling**: `.optional()` fields are converted to `.nullable()` with a transform that converts `null` → `undefined`, preserving optional semantics while satisfying OpenAI's strict mode requirements
|
|
333
|
+
- **Preserves nullable fields**: Intentionally `.nullable()` fields remain unchanged
|
|
334
|
+
- **Deep transformation**: Handles `.optional()` fields at any nesting level (objects, arrays, unions, etc.)
|
|
335
|
+
- **JSON Schema objects**: Not transformed, only Zod schemas
|
|
336
|
+
|
|
337
|
+
## Example
|
|
338
|
+
|
|
339
|
+
```typescript
|
|
340
|
+
const agent = new Agent({
|
|
341
|
+
name: 'data-extractor',
|
|
342
|
+
model: { provider: 'openai', modelId: 'gpt-4o' },
|
|
343
|
+
instructions: 'Extract user information',
|
|
344
|
+
});
|
|
345
|
+
|
|
346
|
+
const schema = z.object({
|
|
347
|
+
name: z.string(),
|
|
348
|
+
age: z.number().optional(),
|
|
349
|
+
deletedAt: z.date().nullable(),
|
|
350
|
+
});
|
|
351
|
+
|
|
352
|
+
// Schema is automatically transformed for OpenAI compatibility
|
|
353
|
+
const result = await agent.generate('Extract: John, deleted yesterday', {
|
|
354
|
+
structuredOutput: { schema },
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
// Result: { name: 'John', age: undefined, deletedAt: null }
|
|
199
358
|
```
|
|
200
|
-
export const supportWorkflow = mainWorkflow.then(nestedWorkflow).commit();
|
|
201
|
-
```
|
|
202
|
-
|
|
203
|
-
And a step in `nestedWorkflow` is supsended, you can now also resume it any of these ways:
|
|
204
359
|
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
360
|
+
- deleteVectors, deleteFilter when upserting, updateVector filter (#10244) ([#10526](https://github.com/mastra-ai/mastra/pull/10526))
|
|
361
|
+
|
|
362
|
+
- Fix generateTitle model type to accept AI SDK LanguageModelV2 ([#10567](https://github.com/mastra-ai/mastra/pull/10567))
|
|
363
|
+
|
|
364
|
+
Updated the `generateTitle.model` config option to accept `MastraModelConfig` instead of `MastraLanguageModel`. This allows users to pass raw AI SDK `LanguageModelV2` models (e.g., `anthropic.languageModel('claude-3-5-haiku-20241022')`) directly without type errors.
|
|
365
|
+
|
|
366
|
+
Previously, passing a standard `LanguageModelV2` would fail because `MastraLanguageModelV2` has different `doGenerate`/`doStream` return types. Now `MastraModelConfig` is used consistently across:
|
|
367
|
+
- `memory/types.ts` - `generateTitle.model` config
|
|
368
|
+
- `agent.ts` - `genTitle`, `generateTitleFromUserMessage`, `resolveTitleGenerationConfig`
|
|
369
|
+
- `agent-legacy.ts` - `AgentLegacyCapabilities` interface
|
|
370
|
+
|
|
371
|
+
- Fix message metadata not persisting when using simple message format. Previously, custom metadata passed in messages (e.g., `{role: 'user', content: 'text', metadata: {userId: '123'}}`) was not being saved to the database. This occurred because the CoreMessage conversion path didn't preserve metadata fields. ([#10571](https://github.com/mastra-ai/mastra/pull/10571))
|
|
372
|
+
|
|
373
|
+
Now metadata is properly preserved for all message input formats:
|
|
374
|
+
- Simple CoreMessage format: `{role, content, metadata}`
|
|
375
|
+
- Full UIMessage format: `{role, content, parts, metadata}`
|
|
376
|
+
- AI SDK v5 ModelMessage format with metadata
|
|
377
|
+
|
|
378
|
+
Fixes #8556
|
|
379
|
+
|
|
380
|
+
- feat: Composite auth implementation ([#10486](https://github.com/mastra-ai/mastra/pull/10486))
|
|
381
|
+
|
|
382
|
+
- Fix requireApproval property being ignored for tools passed via toolsets, clientTools, and memoryTools parameters. The requireApproval flag now correctly propagates through all tool conversion paths, ensuring tools requiring approval will properly request user approval before execution. ([#10562](https://github.com/mastra-ai/mastra/pull/10562))
|
|
383
|
+
|
|
384
|
+
- Fix Azure Foundry rate limit handling for -1 values ([#10411](https://github.com/mastra-ai/mastra/pull/10411))
|
|
385
|
+
|
|
386
|
+
- Fix model headers not being passed through gateway system ([#10564](https://github.com/mastra-ai/mastra/pull/10564))
|
|
387
|
+
|
|
388
|
+
Previously, custom headers specified in `MastraModelConfig` were not being passed through the gateway system to model providers. This affected:
|
|
389
|
+
- OpenRouter (preventing activity tracking with `HTTP-Referer` and `X-Title`)
|
|
390
|
+
- Custom providers using custom URLs (headers not passed to `createOpenAICompatible`)
|
|
391
|
+
- Custom gateway implementations (headers not available in `resolveLanguageModel`)
|
|
392
|
+
|
|
393
|
+
Now headers are correctly passed through the entire gateway system:
|
|
394
|
+
- Base `MastraModelGateway` interface updated to accept headers
|
|
395
|
+
- `ModelRouterLanguageModel` passes headers from config to all gateways
|
|
396
|
+
- OpenRouter receives headers for activity tracking
|
|
397
|
+
- Custom URL providers receive headers via `createOpenAICompatible`
|
|
398
|
+
- Custom gateways can access headers in their `resolveLanguageModel` implementation
|
|
399
|
+
|
|
400
|
+
Example usage:
|
|
401
|
+
|
|
402
|
+
```typescript
|
|
403
|
+
// Works with OpenRouter
|
|
404
|
+
const agent = new Agent({
|
|
405
|
+
name: 'my-agent',
|
|
406
|
+
instructions: 'You are a helpful assistant.',
|
|
407
|
+
model: {
|
|
408
|
+
id: 'openrouter/anthropic/claude-3-5-sonnet',
|
|
409
|
+
headers: {
|
|
410
|
+
'HTTP-Referer': 'https://myapp.com',
|
|
411
|
+
'X-Title': 'My Application',
|
|
412
|
+
},
|
|
413
|
+
},
|
|
414
|
+
});
|
|
415
|
+
|
|
416
|
+
// Also works with custom providers
|
|
417
|
+
const customAgent = new Agent({
|
|
418
|
+
name: 'custom-agent',
|
|
419
|
+
instructions: 'You are a helpful assistant.',
|
|
420
|
+
model: {
|
|
421
|
+
id: 'custom-provider/model',
|
|
422
|
+
url: 'https://api.custom.com/v1',
|
|
423
|
+
apiKey: 'key',
|
|
424
|
+
headers: {
|
|
425
|
+
'X-Custom-Header': 'custom-value',
|
|
426
|
+
},
|
|
427
|
+
},
|
|
428
|
+
});
|
|
210
429
|
```
|
|
211
430
|
|
|
212
|
-
|
|
431
|
+
Fixes https://github.com/mastra-ai/mastra/issues/9760
|
|
213
432
|
|
|
214
|
-
|
|
215
|
-
run.resume({
|
|
216
|
-
step: "nestedWorkflow", // just the nested workflow step/step id
|
|
217
|
-
//other resume params
|
|
218
|
-
})
|
|
219
|
-
```
|
|
433
|
+
- fix(agent): persist messages before tool suspension ([#10542](https://github.com/mastra-ai/mastra/pull/10542))
|
|
220
434
|
|
|
221
|
-
|
|
435
|
+
Fixes issues where thread and messages were not saved before suspension when tools require approval or call suspend() during execution. This caused conversation history to be lost if users refreshed during tool approval or suspension.
|
|
222
436
|
|
|
223
|
-
|
|
437
|
+
**Backend changes (@mastra/core):**
|
|
438
|
+
- Add assistant messages to messageList immediately after LLM execution
|
|
439
|
+
- Flush messages synchronously before suspension to persist state
|
|
440
|
+
- Create thread if it doesn't exist before flushing
|
|
441
|
+
- Add metadata helpers to persist and remove tool approval state
|
|
442
|
+
- Pass saveQueueManager and memory context through workflow for immediate persistence
|
|
224
443
|
|
|
225
|
-
|
|
444
|
+
**Frontend changes (@mastra/react):**
|
|
445
|
+
- Extract runId from pending approvals to enable resumption after refresh
|
|
446
|
+
- Convert `pendingToolApprovals` (DB format) to `requireApprovalMetadata` (runtime format)
|
|
447
|
+
- Handle both `dynamic-tool` and `tool-{NAME}` part types for approval state
|
|
448
|
+
- Change runId from hardcoded `agentId` to unique `uuid()`
|
|
226
449
|
|
|
227
|
-
|
|
450
|
+
**UI changes (@mastra/playground-ui):**
|
|
451
|
+
- Handle tool calls awaiting approval in message initialization
|
|
452
|
+
- Convert approval metadata format when loading initial messages
|
|
228
453
|
|
|
229
|
-
|
|
454
|
+
Fixes #9745, #9906
|
|
230
455
|
|
|
231
|
-
|
|
456
|
+
- Fix race condition in parallel tool stream writes ([#10481](https://github.com/mastra-ai/mastra/pull/10481))
|
|
232
457
|
|
|
233
|
-
|
|
234
|
-
export const supportWorkflow = mainWorkflow.then(nestedWorkflow).commit();
|
|
235
|
-
```
|
|
458
|
+
Introduces a write queue to ToolStream to serialize access to the underlying stream, preventing writer locked errors
|
|
236
459
|
|
|
237
|
-
|
|
460
|
+
- Remove unneeded console warning when flushing messages and no threadId or saveQueueManager is found. ([#10542](https://github.com/mastra-ai/mastra/pull/10542))
|
|
238
461
|
|
|
239
|
-
|
|
240
|
-
run.resume({
|
|
241
|
-
step: "nestedWorkflow.suspendedStep", //chained nested workflow step id and suspended step id
|
|
242
|
-
//other resume params
|
|
243
|
-
})
|
|
244
|
-
```
|
|
245
|
-
|
|
246
|
-
OR
|
|
462
|
+
- Fixes GPT-5 reasoning which was failing on subsequent tool calls with the error: ([#10489](https://github.com/mastra-ai/mastra/pull/10489))
|
|
247
463
|
|
|
248
464
|
```
|
|
249
|
-
|
|
250
|
-
step: "nestedWorkflow", // just the nested workflow step/step id
|
|
251
|
-
//other resume params
|
|
252
|
-
})
|
|
465
|
+
Item 'fc_xxx' of type 'function_call' was provided without its required 'reasoning' item: 'rs_xxx'
|
|
253
466
|
```
|
|
254
467
|
|
|
255
|
-
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
## 0.23.2
|
|
260
|
-
|
|
261
|
-
### Patch Changes
|
|
468
|
+
- Add optional includeRawChunks parameter to agent execution options, ([#10459](https://github.com/mastra-ai/mastra/pull/10459))
|
|
469
|
+
allowing users to include raw chunks in stream output where supported
|
|
470
|
+
by the model provider.
|
|
262
471
|
|
|
263
|
-
-
|
|
472
|
+
- When `mastra dev` runs, multiple processes can write to `provider-registry.json` concurrently (auto-refresh, syncGateways, syncGlobalCacheToLocal). This causes file corruption where the end of the JSON appears twice, making it unparseable. ([#10529](https://github.com/mastra-ai/mastra/pull/10529))
|
|
264
473
|
|
|
265
|
-
|
|
474
|
+
The fix uses atomic writes via the write-to-temp-then-rename pattern. Instead of:
|
|
266
475
|
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
- Detect thenable objects returned by AI model providers ([#9414](https://github.com/mastra-ai/mastra/pull/9414))
|
|
272
|
-
|
|
273
|
-
- Bug fix: Use input processors that are passed in generate or stream agent options rather than always defaulting to the processors set on the Agent class. ([#9422](https://github.com/mastra-ai/mastra/pull/9422))
|
|
274
|
-
|
|
275
|
-
- Fix tool input validation to use schema-compat transformed schemas ([#9360](https://github.com/mastra-ai/mastra/pull/9360))
|
|
276
|
-
|
|
277
|
-
Previously, tool input validation used the original Zod schema while the LLM received a schema-compat transformed version. This caused validation failures when LLMs (like OpenAI o3 or Claude 3.5 Haiku) sent arguments matching the transformed schema but not the original.
|
|
278
|
-
|
|
279
|
-
For example:
|
|
280
|
-
- OpenAI o3 reasoning models convert `.optional()` to `.nullable()`, sending `null` values
|
|
281
|
-
- Claude 3.5 Haiku strips `min`/`max` string constraints, sending shorter strings
|
|
282
|
-
- Validation would reject these valid responses because it checked against the original schema
|
|
283
|
-
|
|
284
|
-
The fix ensures validation uses the same schema-compat processed schema that was sent to the LLM, eliminating this mismatch.
|
|
285
|
-
|
|
286
|
-
- Add import for WritableStream in execution-engine and dedupe llm.getModel in agent.ts ([#9341](https://github.com/mastra-ai/mastra/pull/9341))
|
|
476
|
+
```ts
|
|
477
|
+
fs.writeFileSync(filePath, content, 'utf-8');
|
|
478
|
+
```
|
|
287
479
|
|
|
288
|
-
|
|
480
|
+
We now do:
|
|
289
481
|
|
|
290
|
-
|
|
291
|
-
|
|
482
|
+
```ts
|
|
483
|
+
const tempPath = `${filePath}.${process.pid}.${Date.now()}.${randomSuffix}.tmp`;
|
|
484
|
+
fs.writeFileSync(tempPath, content, 'utf-8');
|
|
485
|
+
fs.renameSync(tempPath, filePath); // atomic on POSIX
|
|
486
|
+
```
|
|
292
487
|
|
|
293
|
-
|
|
488
|
+
`fs.rename()` is atomic on POSIX systems when both paths are on the same filesystem, so concurrent writes will each complete fully rather than interleaving.
|
|
294
489
|
|
|
295
|
-
-
|
|
296
|
-
-
|
|
297
|
-
-
|
|
490
|
+
- Ensures that data chunks written via `writer.custom()` always bubble up directly to the top-level stream, even when nested in sub-agents. This allows tools to emit custom progress updates, metrics, and other data that can be consumed at any level of the agent hierarchy. ([#10523](https://github.com/mastra-ai/mastra/pull/10523))
|
|
491
|
+
- **Added bubbling logic in sub-agent execution**: When sub-agents execute, data chunks (chunks with type starting with `data-`) are detected and written via `writer.custom()` instead of `writer.write()`, ensuring they bubble up directly without being wrapped in `tool-output` chunks.
|
|
492
|
+
- **Added comprehensive tests**:
|
|
493
|
+
- Test for `writer.custom()` with direct tool execution
|
|
494
|
+
- Test for `writer.custom()` with sub-agent tools (nested execution)
|
|
495
|
+
- Test for mixed usage of `writer.write()` and `writer.custom()` in the same tool
|
|
298
496
|
|
|
299
|
-
-
|
|
497
|
+
When a sub-agent's tool uses `writer.custom()` to write data chunks, those chunks appear in the sub-agent's stream. The parent agent's execution logic now detects these chunks and uses `writer.custom()` to bubble them up directly, preserving their structure and making them accessible at the top level.
|
|
300
498
|
|
|
499
|
+
This ensures that:
|
|
500
|
+
- Data chunks from tools always appear directly in the stream (not wrapped)
|
|
301
501
|
|
|
302
|
-
...
|
|
502
|
+
... 5345 more lines hidden. See full changelog in package directory.
|